Compare commits
154 Commits
2016.06.11
...
2016.06.26
Author | SHA1 | Date | |
---|---|---|---|
92747e664a | |||
f1f336322d | |||
bf8dd79045 | |||
c6781156aa | |||
f484c5fa25 | |||
88d9f6c0c4 | |||
3c9c088f9c | |||
fc3996bfe1 | |||
5b6ad8630c | |||
30105f4ac0 | |||
1143535d76 | |||
7d52c052ef | |||
a2406fce3c | |||
3b34ab538c | |||
ac782306f1 | |||
0c00e889f3 | |||
ce96ed05f4 | |||
0463b77a1f | |||
2d185706ea | |||
b72b44318c | |||
46f59e89ea | |||
b4241e308e | |||
3d4b08dfc7 | |||
be49068d65 | |||
525cedb971 | |||
de3c7fe0d4 | |||
896cc72750 | |||
c1ff6e1ad0 | |||
fee70322d7 | |||
8065d6c55f | |||
494172d2e5 | |||
6e3c2047f8 | |||
011bd3221b | |||
b46eabecd3 | |||
0437307a41 | |||
22b7ac13ef | |||
96f88e91b7 | |||
3331a4644d | |||
adf1921dc1 | |||
97674f0419 | |||
73843ae8ac | |||
f2bb8c036a | |||
75ca6bcee2 | |||
089657ed1f | |||
b5eab86c24 | |||
c8e3e0974b | |||
dfc8f46e1c | |||
c143ddce5d | |||
169d836feb | |||
6ae938b295 | |||
cf40fdf5c1 | |||
23bdae0955 | |||
ca74c90bf5 | |||
7cfc1e2a10 | |||
1ac5705f62 | |||
e4f90ea0a7 | |||
cdfc187cd5 | |||
feef925f49 | |||
19e2d1cdea | |||
8369a4fe76 | |||
1f749b6658 | |||
819707920a | |||
43518503a6 | |||
5839d556e4 | |||
6c83e583b3 | |||
6aeb64b673 | |||
6cd64b6806 | |||
e154c65128 | |||
a50fd6e026 | |||
6a55bb66ee | |||
7c05097633 | |||
589568789f | |||
7577d849a6 | |||
cb23192bc4 | |||
41c1023300 | |||
90b6288cce | |||
c1823c8ad9 | |||
d7c6c656c5 | |||
b0b128049a | |||
e8f13f2637 | |||
b5aad37f6b | |||
6d0d4fc26d | |||
0278aa443f | |||
1f35745758 | |||
573c35272f | |||
09e3f91e40 | |||
1b6cf16be7 | |||
26264cb056 | |||
a72df5f36f | |||
c878e635de | |||
0f47cc2e92 | |||
5fc2757682 | |||
e3944c2621 | |||
667d96480b | |||
e6fe993c31 | |||
d0d93f76ea | |||
20a6a154fe | |||
f011876076 | |||
6929569403 | |||
eb451890da | |||
ded7511a70 | |||
d2161cade5 | |||
27e5fa8198 | |||
efbd1eb51a | |||
369ff75081 | |||
47212f7bcb | |||
4c93ee8d14 | |||
8bc4dbb1af | |||
6c3760292c | |||
4cef70db6c | |||
ff4af6ec59 | |||
d01fb21d4c | |||
a4ea28eee6 | |||
bc2a871f3e | |||
1759672eed | |||
fea55ef4a9 | |||
16b6bd01d2 | |||
14d0f4e0f3 | |||
778f969447 | |||
79cd8b3d8a | |||
b4663f12b1 | |||
b50e02c1e4 | |||
33b72ce64e | |||
cf2bf840ba | |||
bccdac6874 | |||
e69f9f5d68 | |||
77a9a9c295 | |||
84dcd1c4e4 | |||
971e3b7520 | |||
4e79011729 | |||
a936ac321c | |||
98960c911c | |||
329ca3bef6 | |||
2c3322e36e | |||
80ae228b34 | |||
6d28c408cf | |||
c83b35d4aa | |||
94e5d6aedb | |||
531a74968c | |||
c5edd147d1 | |||
856150d056 | |||
03ebea89b0 | |||
15d106787e | |||
7aab3696dd | |||
47787efa2b | |||
4a420119a6 | |||
33751818d3 | |||
698f127c1a | |||
fe458b6596 | |||
21ac1a8ac3 | |||
79027c0ea0 | |||
4cad2929cd | |||
62666af99f | |||
9ddc289f88 |
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@ -6,8 +6,8 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.06.11.1*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.06.26*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.06.11.1**
|
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.06.26**
|
||||||
|
|
||||||
### Before submitting an *issue* make sure you have:
|
### Before submitting an *issue* make sure you have:
|
||||||
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
@ -35,7 +35,7 @@ $ youtube-dl -v <your command line>
|
|||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2016.06.11.1
|
[debug] youtube-dl version 2016.06.26
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
3
AUTHORS
3
AUTHORS
@ -173,3 +173,6 @@ Kevin Deldycke
|
|||||||
inondle
|
inondle
|
||||||
Tomáš Čech
|
Tomáš Čech
|
||||||
Déstin Reed
|
Déstin Reed
|
||||||
|
Roman Tsiupa
|
||||||
|
Artur Krysiak
|
||||||
|
Jakub Adam Wieczorek
|
||||||
|
@ -142,9 +142,9 @@ After you have ensured this site is distributing it's content legally, you can f
|
|||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L74-L252). Add tests and code for as many as you want.
|
||||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L148-L252) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8). Also make sure your code works under all [Python](http://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
||||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/extractors.py
|
$ git add youtube_dl/extractor/extractors.py
|
||||||
|
17
README.md
17
README.md
@ -44,7 +44,7 @@ Or with [MacPorts](https://www.macports.org/):
|
|||||||
Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see the [youtube-dl Download Page](https://rg3.github.io/youtube-dl/download.html).
|
Alternatively, refer to the [developer instructions](#developer-instructions) for how to check out and work with the git repository. For further options, including PGP signatures, see the [youtube-dl Download Page](https://rg3.github.io/youtube-dl/download.html).
|
||||||
|
|
||||||
# DESCRIPTION
|
# DESCRIPTION
|
||||||
**youtube-dl** is a small command-line program to download videos from
|
**youtube-dl** is a command-line program to download videos from
|
||||||
YouTube.com and a few more sites. It requires the Python interpreter, version
|
YouTube.com and a few more sites. It requires the Python interpreter, version
|
||||||
2.6, 2.7, or 3.2+, and it is not platform specific. It should work on
|
2.6, 2.7, or 3.2+, and it is not platform specific. It should work on
|
||||||
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
||||||
@ -511,6 +511,9 @@ The basic usage is not to set any template arguments when downloading a single f
|
|||||||
- `autonumber`: Five-digit number that will be increased with each download, starting at zero
|
- `autonumber`: Five-digit number that will be increased with each download, starting at zero
|
||||||
- `playlist`: Name or id of the playlist that contains the video
|
- `playlist`: Name or id of the playlist that contains the video
|
||||||
- `playlist_index`: Index of the video in the playlist padded with leading zeros according to the total length of the playlist
|
- `playlist_index`: Index of the video in the playlist padded with leading zeros according to the total length of the playlist
|
||||||
|
- `playlist_id`: Playlist identifier
|
||||||
|
- `playlist_title`: Playlist title
|
||||||
|
|
||||||
|
|
||||||
Available for the video that belongs to some logical chapter or section:
|
Available for the video that belongs to some logical chapter or section:
|
||||||
- `chapter`: Name or title of the chapter the video belongs to
|
- `chapter`: Name or title of the chapter the video belongs to
|
||||||
@ -550,6 +553,10 @@ The current default template is `%(title)s-%(id)s.%(ext)s`.
|
|||||||
|
|
||||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||||
|
|
||||||
|
#### Output template and Windows batch files
|
||||||
|
|
||||||
|
If you are using output template inside a Windows batch file then you must escape plain percent characters (`%`) by doubling, so that `-o "%(title)s-%(id)s.%(ext)s"` should become `-o "%%(title)s-%%(id)s.%%(ext)s"`. However you should not touch `%`'s that are not plain characters, e.g. environment variables for expansion should stay intact: `-o "C:\%HOMEPATH%\Desktop\%%(title)s.%%(ext)s"`.
|
||||||
|
|
||||||
#### Output template examples
|
#### Output template examples
|
||||||
|
|
||||||
Note on Windows you may need to use double quotes instead of single.
|
Note on Windows you may need to use double quotes instead of single.
|
||||||
@ -928,9 +935,9 @@ After you have ensured this site is distributing it's content legally, you can f
|
|||||||
```
|
```
|
||||||
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
5. Add an import in [`youtube_dl/extractor/extractors.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/extractors.py).
|
||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L74-L252). Add tests and code for as many as you want.
|
||||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L148-L252) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8). Also make sure your code works under all [Python](http://www.python.org/) versions claimed supported by youtube-dl, namely 2.6, 2.7, and 3.2+.
|
||||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/extractors.py
|
$ git add youtube_dl/extractor/extractors.py
|
||||||
@ -957,7 +964,7 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
|||||||
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||||
```
|
```
|
||||||
|
|
||||||
Most likely, you'll want to use various options. For a list of what can be done, have a look at [`youtube_dl/YoutubeDL.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L121-L269). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
Most likely, you'll want to use various options. For a list of options available, have a look at [`youtube_dl/YoutubeDL.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L128-L278). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
||||||
|
|
||||||
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
|
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
|
||||||
|
|
||||||
|
@ -14,15 +14,17 @@ if os.path.exists(lazy_extractors_filename):
|
|||||||
os.remove(lazy_extractors_filename)
|
os.remove(lazy_extractors_filename)
|
||||||
|
|
||||||
from youtube_dl.extractor import _ALL_CLASSES
|
from youtube_dl.extractor import _ALL_CLASSES
|
||||||
from youtube_dl.extractor.common import InfoExtractor
|
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||||
|
|
||||||
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
with open('devscripts/lazy_load_template.py', 'rt') as f:
|
||||||
module_template = f.read()
|
module_template = f.read()
|
||||||
|
|
||||||
module_contents = [module_template + '\n' + getsource(InfoExtractor.suitable)]
|
module_contents = [
|
||||||
|
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n',
|
||||||
|
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n']
|
||||||
|
|
||||||
ie_template = '''
|
ie_template = '''
|
||||||
class {name}(LazyLoadExtractor):
|
class {name}({bases}):
|
||||||
_VALID_URL = {valid_url!r}
|
_VALID_URL = {valid_url!r}
|
||||||
_module = '{module}'
|
_module = '{module}'
|
||||||
'''
|
'''
|
||||||
@ -34,10 +36,20 @@ make_valid_template = '''
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def get_base_name(base):
|
||||||
|
if base is InfoExtractor:
|
||||||
|
return 'LazyLoadExtractor'
|
||||||
|
elif base is SearchInfoExtractor:
|
||||||
|
return 'LazyLoadSearchExtractor'
|
||||||
|
else:
|
||||||
|
return base.__name__
|
||||||
|
|
||||||
|
|
||||||
def build_lazy_ie(ie, name):
|
def build_lazy_ie(ie, name):
|
||||||
valid_url = getattr(ie, '_VALID_URL', None)
|
valid_url = getattr(ie, '_VALID_URL', None)
|
||||||
s = ie_template.format(
|
s = ie_template.format(
|
||||||
name=name,
|
name=name,
|
||||||
|
bases=', '.join(map(get_base_name, ie.__bases__)),
|
||||||
valid_url=valid_url,
|
valid_url=valid_url,
|
||||||
module=ie.__module__)
|
module=ie.__module__)
|
||||||
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
|
||||||
@ -47,11 +59,34 @@ def build_lazy_ie(ie, name):
|
|||||||
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
s += make_valid_template.format(valid_url=ie._make_valid_url())
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
# find the correct sorting and add the required base classes so that sublcasses
|
||||||
|
# can be correctly created
|
||||||
|
classes = _ALL_CLASSES[:-1]
|
||||||
|
ordered_cls = []
|
||||||
|
while classes:
|
||||||
|
for c in classes[:]:
|
||||||
|
bases = set(c.__bases__) - set((object, InfoExtractor, SearchInfoExtractor))
|
||||||
|
stop = False
|
||||||
|
for b in bases:
|
||||||
|
if b not in classes and b not in ordered_cls:
|
||||||
|
if b.__name__ == 'GenericIE':
|
||||||
|
exit()
|
||||||
|
classes.insert(0, b)
|
||||||
|
stop = True
|
||||||
|
if stop:
|
||||||
|
break
|
||||||
|
if all(b in ordered_cls for b in bases):
|
||||||
|
ordered_cls.append(c)
|
||||||
|
classes.remove(c)
|
||||||
|
break
|
||||||
|
ordered_cls.append(_ALL_CLASSES[-1])
|
||||||
|
|
||||||
names = []
|
names = []
|
||||||
for ie in list(sorted(_ALL_CLASSES[:-1], key=lambda cls: cls.ie_key())) + _ALL_CLASSES[-1:]:
|
for ie in ordered_cls:
|
||||||
name = ie.ie_key() + 'IE'
|
name = ie.__name__
|
||||||
src = build_lazy_ie(ie, name)
|
src = build_lazy_ie(ie, name)
|
||||||
module_contents.append(src)
|
module_contents.append(src)
|
||||||
|
if ie in _ALL_CLASSES:
|
||||||
names.append(name)
|
names.append(name)
|
||||||
|
|
||||||
module_contents.append(
|
module_contents.append(
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
skip_tests=true
|
skip_tests=true
|
||||||
|
gpg_sign_commits=""
|
||||||
buildserver='localhost:8142'
|
buildserver='localhost:8142'
|
||||||
|
|
||||||
while true
|
while true
|
||||||
@ -24,6 +25,10 @@ case "$1" in
|
|||||||
skip_tests=false
|
skip_tests=false
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--gpg-sign-commits|-S)
|
||||||
|
gpg_sign_commits="-S"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--buildserver)
|
--buildserver)
|
||||||
buildserver="$2"
|
buildserver="$2"
|
||||||
shift 2
|
shift 2
|
||||||
@ -69,7 +74,7 @@ sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
|||||||
/bin/echo -e "\n### Committing documentation, templates and youtube_dl/version.py..."
|
/bin/echo -e "\n### Committing documentation, templates and youtube_dl/version.py..."
|
||||||
make README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md supportedsites
|
make README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md supportedsites
|
||||||
git add README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md docs/supportedsites.md youtube_dl/version.py
|
git add README.md CONTRIBUTING.md .github/ISSUE_TEMPLATE.md docs/supportedsites.md youtube_dl/version.py
|
||||||
git commit -m "release $version"
|
git commit $gpg_sign_commits -m "release $version"
|
||||||
|
|
||||||
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
||||||
git tag -s -m "Release $version" "$version"
|
git tag -s -m "Release $version" "$version"
|
||||||
@ -116,7 +121,7 @@ git clone --branch gh-pages --single-branch . build/gh-pages
|
|||||||
"$ROOT/devscripts/gh-pages/update-copyright.py"
|
"$ROOT/devscripts/gh-pages/update-copyright.py"
|
||||||
"$ROOT/devscripts/gh-pages/update-sites.py"
|
"$ROOT/devscripts/gh-pages/update-sites.py"
|
||||||
git add *.html *.html.in update
|
git add *.html *.html.in update
|
||||||
git commit -m "release $version"
|
git commit $gpg_sign_commits -m "release $version"
|
||||||
git push "$ROOT" gh-pages
|
git push "$ROOT" gh-pages
|
||||||
git push "$ORIGIN_URL" gh-pages
|
git push "$ORIGIN_URL" gh-pages
|
||||||
)
|
)
|
||||||
|
@ -74,6 +74,8 @@
|
|||||||
- **bbc**: BBC
|
- **bbc**: BBC
|
||||||
- **bbc.co.uk**: BBC iPlayer
|
- **bbc.co.uk**: BBC iPlayer
|
||||||
- **bbc.co.uk:article**: BBC articles
|
- **bbc.co.uk:article**: BBC articles
|
||||||
|
- **bbc.co.uk:iplayer:playlist**
|
||||||
|
- **bbc.co.uk:playlist**
|
||||||
- **BeatportPro**
|
- **BeatportPro**
|
||||||
- **Beeg**
|
- **Beeg**
|
||||||
- **BehindKink**
|
- **BehindKink**
|
||||||
@ -104,6 +106,8 @@
|
|||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
||||||
- **Canvas**
|
- **Canvas**
|
||||||
|
- **CarambaTV**
|
||||||
|
- **CarambaTVPage**
|
||||||
- **CBC**
|
- **CBC**
|
||||||
- **CBCPlayer**
|
- **CBCPlayer**
|
||||||
- **CBS**
|
- **CBS**
|
||||||
@ -124,6 +128,7 @@
|
|||||||
- **cliphunter**
|
- **cliphunter**
|
||||||
- **ClipRs**
|
- **ClipRs**
|
||||||
- **Clipsyndicate**
|
- **Clipsyndicate**
|
||||||
|
- **CloserToTruth**
|
||||||
- **cloudtime**: CloudTime
|
- **cloudtime**: CloudTime
|
||||||
- **Cloudy**
|
- **Cloudy**
|
||||||
- **Clubic**
|
- **Clubic**
|
||||||
@ -243,7 +248,6 @@
|
|||||||
- **Gamersyde**
|
- **Gamersyde**
|
||||||
- **GameSpot**
|
- **GameSpot**
|
||||||
- **GameStar**
|
- **GameStar**
|
||||||
- **Gametrailers**
|
|
||||||
- **Gazeta**
|
- **Gazeta**
|
||||||
- **GDCVault**
|
- **GDCVault**
|
||||||
- **generic**: Generic downloader that works on some sites
|
- **generic**: Generic downloader that works on some sites
|
||||||
@ -381,7 +385,7 @@
|
|||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
- **Moviezine**
|
- **Moviezine**
|
||||||
- **MPORA**
|
- **MPORA**
|
||||||
- **MSNBC**
|
- **MSN**
|
||||||
- **MTV**
|
- **MTV**
|
||||||
- **mtv.de**
|
- **mtv.de**
|
||||||
- **mtviggy.com**
|
- **mtviggy.com**
|
||||||
@ -432,6 +436,7 @@
|
|||||||
- **nhl.com:videocenter**
|
- **nhl.com:videocenter**
|
||||||
- **nhl.com:videocenter:category**: NHL videocenter category
|
- **nhl.com:videocenter:category**: NHL videocenter category
|
||||||
- **nick.com**
|
- **nick.com**
|
||||||
|
- **nick.de**
|
||||||
- **niconico**: ニコニコ動画
|
- **niconico**: ニコニコ動画
|
||||||
- **NiconicoPlaylist**
|
- **NiconicoPlaylist**
|
||||||
- **njoy**: N-JOY
|
- **njoy**: N-JOY
|
||||||
@ -497,6 +502,7 @@
|
|||||||
- **plus.google**: Google Plus
|
- **plus.google**: Google Plus
|
||||||
- **pluzz.francetv.fr**
|
- **pluzz.francetv.fr**
|
||||||
- **podomatic**
|
- **podomatic**
|
||||||
|
- **PolskieRadio**
|
||||||
- **PornHd**
|
- **PornHd**
|
||||||
- **PornHub**
|
- **PornHub**
|
||||||
- **PornHubPlaylist**
|
- **PornHubPlaylist**
|
||||||
@ -516,6 +522,7 @@
|
|||||||
- **qqmusic:singer**: QQ音乐 - 歌手
|
- **qqmusic:singer**: QQ音乐 - 歌手
|
||||||
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
- **qqmusic:toplist**: QQ音乐 - 排行榜
|
||||||
- **R7**
|
- **R7**
|
||||||
|
- **R7Article**
|
||||||
- **radio.de**
|
- **radio.de**
|
||||||
- **radiobremen**
|
- **radiobremen**
|
||||||
- **radiocanada**
|
- **radiocanada**
|
||||||
@ -535,6 +542,7 @@
|
|||||||
- **revision3:embed**
|
- **revision3:embed**
|
||||||
- **RICE**
|
- **RICE**
|
||||||
- **RingTV**
|
- **RingTV**
|
||||||
|
- **RockstarGames**
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Roxwel**
|
- **Roxwel**
|
||||||
- **RTBF**
|
- **RTBF**
|
||||||
@ -647,6 +655,7 @@
|
|||||||
- **Telegraaf**
|
- **Telegraaf**
|
||||||
- **TeleMB**
|
- **TeleMB**
|
||||||
- **TeleTask**
|
- **TeleTask**
|
||||||
|
- **Telewebion**
|
||||||
- **TF1**
|
- **TF1**
|
||||||
- **TheIntercept**
|
- **TheIntercept**
|
||||||
- **ThePlatform**
|
- **ThePlatform**
|
||||||
@ -698,6 +707,7 @@
|
|||||||
- **TVPlay**: TV3Play and related services
|
- **TVPlay**: TV3Play and related services
|
||||||
- **Tweakers**
|
- **Tweakers**
|
||||||
- **twitch:chapter**
|
- **twitch:chapter**
|
||||||
|
- **twitch:clips**
|
||||||
- **twitch:past_broadcasts**
|
- **twitch:past_broadcasts**
|
||||||
- **twitch:profile**
|
- **twitch:profile**
|
||||||
- **twitch:stream**
|
- **twitch:stream**
|
||||||
@ -728,6 +738,7 @@
|
|||||||
- **vh1.com**
|
- **vh1.com**
|
||||||
- **Vice**
|
- **Vice**
|
||||||
- **ViceShow**
|
- **ViceShow**
|
||||||
|
- **Vidbit**
|
||||||
- **Viddler**
|
- **Viddler**
|
||||||
- **video.google:search**: Google Video search
|
- **video.google:search**: Google Video search
|
||||||
- **video.mit.edu**
|
- **video.mit.edu**
|
||||||
@ -792,10 +803,11 @@
|
|||||||
- **WNL**
|
- **WNL**
|
||||||
- **WorldStarHipHop**
|
- **WorldStarHipHop**
|
||||||
- **wrzuta.pl**
|
- **wrzuta.pl**
|
||||||
|
- **wrzuta.pl:playlist**
|
||||||
- **WSJ**: Wall Street Journal
|
- **WSJ**: Wall Street Journal
|
||||||
- **XBef**
|
- **XBef**
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
- **XFileShare**: XFileShare based sites: DaClips, FileHoot, GorillaVid, MovPod, PowerWatch, Rapidvideo.ws, TheVideoBee, Vidto, Streamin.To
|
- **XFileShare**: XFileShare based sites: DaClips, FileHoot, GorillaVid, MovPod, PowerWatch, Rapidvideo.ws, TheVideoBee, Vidto, Streamin.To, XVIDSTAGE
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
- **XHamsterEmbed**
|
- **XHamsterEmbed**
|
||||||
- **xiami:album**: 虾米音乐 - 专辑
|
- **xiami:album**: 虾米音乐 - 专辑
|
||||||
|
62
setup.py
62
setup.py
@ -21,25 +21,37 @@ try:
|
|||||||
import py2exe
|
import py2exe
|
||||||
except ImportError:
|
except ImportError:
|
||||||
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||||
print("Cannot import py2exe", file=sys.stderr)
|
print('Cannot import py2exe', file=sys.stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
py2exe_options = {
|
py2exe_options = {
|
||||||
"bundle_files": 1,
|
'bundle_files': 1,
|
||||||
"compressed": 1,
|
'compressed': 1,
|
||||||
"optimize": 2,
|
'optimize': 2,
|
||||||
"dist_dir": '.',
|
'dist_dir': '.',
|
||||||
"dll_excludes": ['w9xpopen.exe', 'crypt32.dll'],
|
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Get the version from youtube_dl/version.py without importing the package
|
||||||
|
exec(compile(open('youtube_dl/version.py').read(),
|
||||||
|
'youtube_dl/version.py', 'exec'))
|
||||||
|
|
||||||
|
DESCRIPTION = 'YouTube video downloader'
|
||||||
|
LONG_DESCRIPTION = 'Command-line program to download videos from YouTube.com and other video sites'
|
||||||
|
|
||||||
py2exe_console = [{
|
py2exe_console = [{
|
||||||
"script": "./youtube_dl/__main__.py",
|
'script': './youtube_dl/__main__.py',
|
||||||
"dest_base": "youtube-dl",
|
'dest_base': 'youtube-dl',
|
||||||
|
'version': __version__,
|
||||||
|
'description': DESCRIPTION,
|
||||||
|
'comments': LONG_DESCRIPTION,
|
||||||
|
'product_name': 'youtube-dl',
|
||||||
|
'product_version': __version__,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
py2exe_params = {
|
py2exe_params = {
|
||||||
'console': py2exe_console,
|
'console': py2exe_console,
|
||||||
'options': {"py2exe": py2exe_options},
|
'options': {'py2exe': py2exe_options},
|
||||||
'zipfile': None
|
'zipfile': None
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +84,7 @@ else:
|
|||||||
params['scripts'] = ['bin/youtube-dl']
|
params['scripts'] = ['bin/youtube-dl']
|
||||||
|
|
||||||
class build_lazy_extractors(Command):
|
class build_lazy_extractors(Command):
|
||||||
description = "Build the extractor lazy loading module"
|
description = 'Build the extractor lazy loading module'
|
||||||
user_options = []
|
user_options = []
|
||||||
|
|
||||||
def initialize_options(self):
|
def initialize_options(self):
|
||||||
@ -87,16 +99,11 @@ class build_lazy_extractors(Command):
|
|||||||
dry_run=self.dry_run,
|
dry_run=self.dry_run,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get the version from youtube_dl/version.py without importing the package
|
|
||||||
exec(compile(open('youtube_dl/version.py').read(),
|
|
||||||
'youtube_dl/version.py', 'exec'))
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='youtube_dl',
|
name='youtube_dl',
|
||||||
version=__version__,
|
version=__version__,
|
||||||
description='YouTube video downloader',
|
description=DESCRIPTION,
|
||||||
long_description='Small command-line program to download videos from'
|
long_description=LONG_DESCRIPTION,
|
||||||
' YouTube.com and other video sites.',
|
|
||||||
url='https://github.com/rg3/youtube-dl',
|
url='https://github.com/rg3/youtube-dl',
|
||||||
author='Ricardo Garcia',
|
author='Ricardo Garcia',
|
||||||
author_email='ytdl@yt-dl.org',
|
author_email='ytdl@yt-dl.org',
|
||||||
@ -112,16 +119,17 @@ setup(
|
|||||||
# test_requires = ['nosetest'],
|
# test_requires = ['nosetest'],
|
||||||
|
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Topic :: Multimedia :: Video",
|
'Topic :: Multimedia :: Video',
|
||||||
"Development Status :: 5 - Production/Stable",
|
'Development Status :: 5 - Production/Stable',
|
||||||
"Environment :: Console",
|
'Environment :: Console',
|
||||||
"License :: Public Domain",
|
'License :: Public Domain',
|
||||||
"Programming Language :: Python :: 2.6",
|
'Programming Language :: Python :: 2.6',
|
||||||
"Programming Language :: Python :: 2.7",
|
'Programming Language :: Python :: 2.7',
|
||||||
"Programming Language :: Python :: 3",
|
'Programming Language :: Python :: 3',
|
||||||
"Programming Language :: Python :: 3.2",
|
'Programming Language :: Python :: 3.2',
|
||||||
"Programming Language :: Python :: 3.3",
|
'Programming Language :: Python :: 3.3',
|
||||||
"Programming Language :: Python :: 3.4",
|
'Programming Language :: Python :: 3.4',
|
||||||
|
'Programming Language :: Python :: 3.5',
|
||||||
],
|
],
|
||||||
|
|
||||||
cmdclass={'build_lazy_extractors': build_lazy_extractors},
|
cmdclass={'build_lazy_extractors': build_lazy_extractors},
|
||||||
|
@ -11,7 +11,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|||||||
from test.helper import FakeYDL
|
from test.helper import FakeYDL
|
||||||
from youtube_dl.extractor.common import InfoExtractor
|
from youtube_dl.extractor.common import InfoExtractor
|
||||||
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
||||||
from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError
|
from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
|
||||||
|
|
||||||
|
|
||||||
class TestIE(InfoExtractor):
|
class TestIE(InfoExtractor):
|
||||||
@ -66,6 +66,11 @@ class TestInfoExtractor(unittest.TestCase):
|
|||||||
self.assertEqual(ie._html_search_meta('d', html), '4')
|
self.assertEqual(ie._html_search_meta('d', html), '4')
|
||||||
self.assertEqual(ie._html_search_meta('e', html), '5')
|
self.assertEqual(ie._html_search_meta('e', html), '5')
|
||||||
self.assertEqual(ie._html_search_meta('f', html), '6')
|
self.assertEqual(ie._html_search_meta('f', html), '6')
|
||||||
|
self.assertEqual(ie._html_search_meta(('a', 'b', 'c'), html), '1')
|
||||||
|
self.assertEqual(ie._html_search_meta(('c', 'b', 'a'), html), '3')
|
||||||
|
self.assertEqual(ie._html_search_meta(('z', 'x', 'c'), html), '3')
|
||||||
|
self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
|
||||||
|
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
|
||||||
|
|
||||||
def test_download_json(self):
|
def test_download_json(self):
|
||||||
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
|
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
|
||||||
|
@ -60,11 +60,13 @@ from youtube_dl.utils import (
|
|||||||
timeconvert,
|
timeconvert,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
unified_timestamp,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
uppercase_escape,
|
uppercase_escape,
|
||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
url_basename,
|
url_basename,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
urshift,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
version_tuple,
|
version_tuple,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
@ -283,8 +285,28 @@ class TestUtil(unittest.TestCase):
|
|||||||
'20150202')
|
'20150202')
|
||||||
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
|
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
|
||||||
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
|
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
|
||||||
|
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
|
||||||
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
|
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
|
||||||
|
|
||||||
|
def test_unified_timestamps(self):
|
||||||
|
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
|
||||||
|
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
|
||||||
|
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
|
||||||
|
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
|
||||||
|
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
|
||||||
|
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
|
||||||
|
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
|
||||||
|
self.assertEqual(
|
||||||
|
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
|
||||||
|
1417001400)
|
||||||
|
self.assertEqual(
|
||||||
|
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
|
||||||
|
1422902860)
|
||||||
|
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
|
||||||
|
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
|
||||||
|
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
|
||||||
|
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
|
||||||
|
|
||||||
def test_determine_ext(self):
|
def test_determine_ext(self):
|
||||||
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
|
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
|
||||||
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
|
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
|
||||||
@ -640,6 +662,9 @@ class TestUtil(unittest.TestCase):
|
|||||||
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
|
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
|
||||||
}''')
|
}''')
|
||||||
|
|
||||||
|
inp = '''{"foo":101}'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
|
||||||
|
|
||||||
def test_js_to_json_edgecases(self):
|
def test_js_to_json_edgecases(self):
|
||||||
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||||
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||||
@ -956,5 +981,9 @@ The first line
|
|||||||
self.assertRaises(ValueError, encode_base_n, 0, 70)
|
self.assertRaises(ValueError, encode_base_n, 0, 70)
|
||||||
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
|
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
|
||||||
|
|
||||||
|
def test_urshift(self):
|
||||||
|
self.assertEqual(urshift(3, 1), 1)
|
||||||
|
self.assertEqual(urshift(-3, 1), 2147483646)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -85,7 +85,7 @@ class ExternalFD(FileDownloader):
|
|||||||
cmd, stderr=subprocess.PIPE)
|
cmd, stderr=subprocess.PIPE)
|
||||||
_, stderr = p.communicate()
|
_, stderr = p.communicate()
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
self.to_stderr(stderr)
|
self.to_stderr(stderr.decode('utf-8', 'replace'))
|
||||||
return p.returncode
|
return p.returncode
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,14 +2,24 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
|
import binascii
|
||||||
|
try:
|
||||||
|
from Crypto.Cipher import AES
|
||||||
|
can_decrypt_frag = True
|
||||||
|
except ImportError:
|
||||||
|
can_decrypt_frag = False
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from .external import FFmpegFD
|
from .external import FFmpegFD
|
||||||
|
|
||||||
from ..compat import compat_urlparse
|
from ..compat import (
|
||||||
|
compat_urlparse,
|
||||||
|
compat_struct_pack,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
|
parse_m3u8_attributes,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -21,7 +31,7 @@ class HlsFD(FragmentFD):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def can_download(manifest):
|
def can_download(manifest):
|
||||||
UNSUPPORTED_FEATURES = (
|
UNSUPPORTED_FEATURES = (
|
||||||
r'#EXT-X-KEY:METHOD=(?!NONE)', # encrypted streams [1]
|
r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]
|
||||||
r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
|
||||||
|
|
||||||
# Live streams heuristic does not always work (e.g. geo restricted to Germany
|
# Live streams heuristic does not always work (e.g. geo restricted to Germany
|
||||||
@ -39,7 +49,9 @@ class HlsFD(FragmentFD):
|
|||||||
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
|
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
|
||||||
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
|
# 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
|
||||||
)
|
)
|
||||||
return all(not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES)
|
check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]
|
||||||
|
check_results.append(can_decrypt_frag or '#EXT-X-KEY:METHOD=AES-128' not in manifest)
|
||||||
|
return all(check_results)
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
man_url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
@ -57,36 +69,60 @@ class HlsFD(FragmentFD):
|
|||||||
fd.add_progress_hook(ph)
|
fd.add_progress_hook(ph)
|
||||||
return fd.real_download(filename, info_dict)
|
return fd.real_download(filename, info_dict)
|
||||||
|
|
||||||
fragment_urls = []
|
total_frags = 0
|
||||||
for line in s.splitlines():
|
for line in s.splitlines():
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
if line and not line.startswith('#'):
|
if line and not line.startswith('#'):
|
||||||
segment_url = (
|
total_frags += 1
|
||||||
line
|
|
||||||
if re.match(r'^https?://', line)
|
|
||||||
else compat_urlparse.urljoin(man_url, line))
|
|
||||||
fragment_urls.append(segment_url)
|
|
||||||
# We only download the first fragment during the test
|
|
||||||
if self.params.get('test', False):
|
|
||||||
break
|
|
||||||
|
|
||||||
ctx = {
|
ctx = {
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'total_frags': len(fragment_urls),
|
'total_frags': total_frags,
|
||||||
}
|
}
|
||||||
|
|
||||||
self._prepare_and_start_frag_download(ctx)
|
self._prepare_and_start_frag_download(ctx)
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
media_sequence = 0
|
||||||
|
decrypt_info = {'METHOD': 'NONE'}
|
||||||
frags_filenames = []
|
frags_filenames = []
|
||||||
for i, frag_url in enumerate(fragment_urls):
|
for line in s.splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if line:
|
||||||
|
if not line.startswith('#'):
|
||||||
|
frag_url = (
|
||||||
|
line
|
||||||
|
if re.match(r'^https?://', line)
|
||||||
|
else compat_urlparse.urljoin(man_url, line))
|
||||||
frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
|
frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
|
||||||
success = ctx['dl'].download(frag_filename, {'url': frag_url})
|
success = ctx['dl'].download(frag_filename, {'url': frag_url})
|
||||||
if not success:
|
if not success:
|
||||||
return False
|
return False
|
||||||
down, frag_sanitized = sanitize_open(frag_filename, 'rb')
|
down, frag_sanitized = sanitize_open(frag_filename, 'rb')
|
||||||
ctx['dest_stream'].write(down.read())
|
frag_content = down.read()
|
||||||
down.close()
|
down.close()
|
||||||
|
if decrypt_info['METHOD'] == 'AES-128':
|
||||||
|
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
|
||||||
|
frag_content = AES.new(
|
||||||
|
decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
|
||||||
|
ctx['dest_stream'].write(frag_content)
|
||||||
frags_filenames.append(frag_sanitized)
|
frags_filenames.append(frag_sanitized)
|
||||||
|
# We only download the first fragment during the test
|
||||||
|
if self.params.get('test', False):
|
||||||
|
break
|
||||||
|
i += 1
|
||||||
|
media_sequence += 1
|
||||||
|
elif line.startswith('#EXT-X-KEY'):
|
||||||
|
decrypt_info = parse_m3u8_attributes(line[11:])
|
||||||
|
if decrypt_info['METHOD'] == 'AES-128':
|
||||||
|
if 'IV' in decrypt_info:
|
||||||
|
decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:])
|
||||||
|
if not re.match(r'^https?://', decrypt_info['URI']):
|
||||||
|
decrypt_info['URI'] = compat_urlparse.urljoin(
|
||||||
|
man_url, decrypt_info['URI'])
|
||||||
|
decrypt_info['KEY'] = self.ydl.urlopen(decrypt_info['URI']).read()
|
||||||
|
elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):
|
||||||
|
media_sequence = int(line[22:])
|
||||||
|
|
||||||
self._finish_frag_download(ctx)
|
self._finish_frag_download(ctx)
|
||||||
|
|
||||||
|
@ -156,7 +156,10 @@ class AdobeTVVideoIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
video_data = self._download_json(url + '?format=json', video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
video_data = self._parse_json(self._search_regex(
|
||||||
|
r'var\s+bridge\s*=\s*([^;]+);', webpage, 'bridged data'), video_id)
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'format_id': '%s-%s' % (determine_ext(source['src']), source.get('height')),
|
'format_id': '%s-%s' % (determine_ext(source['src']), source.get('height')),
|
||||||
|
@ -24,10 +24,10 @@ class AftonbladetIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
# find internal video meta data
|
# find internal video meta data
|
||||||
meta_url = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json'
|
meta_url = 'http://aftonbladet-play-metadata.cdn.drvideo.aptoma.no/video/%s.json'
|
||||||
player_config = self._parse_json(self._html_search_regex(
|
player_config = self._parse_json(self._html_search_regex(
|
||||||
r'data-player-config="([^"]+)"', webpage, 'player config'), video_id)
|
r'data-player-config="([^"]+)"', webpage, 'player config'), video_id)
|
||||||
internal_meta_id = player_config['videoId']
|
internal_meta_id = player_config['aptomaVideoId']
|
||||||
internal_meta_url = meta_url % internal_meta_id
|
internal_meta_url = meta_url % internal_meta_id
|
||||||
internal_meta_json = self._download_json(
|
internal_meta_json = self._download_json(
|
||||||
internal_meta_url, video_id, 'Downloading video meta data')
|
internal_meta_url, video_id, 'Downloading video meta data')
|
||||||
|
@ -7,6 +7,8 @@ from .common import InfoExtractor
|
|||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
parse_duration,
|
||||||
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -16,7 +18,8 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
|
'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'manofsteel',
|
'id': '5111',
|
||||||
|
'title': 'Man of Steel',
|
||||||
},
|
},
|
||||||
'playlist': [
|
'playlist': [
|
||||||
{
|
{
|
||||||
@ -70,6 +73,15 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
'id': 'blackthorn',
|
'id': 'blackthorn',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 2,
|
'playlist_mincount': 2,
|
||||||
|
'expected_warnings': ['Unable to download JSON metadata'],
|
||||||
|
}, {
|
||||||
|
# json data only available from http://trailers.apple.com/trailers/feeds/data/15881.json
|
||||||
|
'url': 'http://trailers.apple.com/trailers/fox/kungfupanda3/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '15881',
|
||||||
|
'title': 'Kung Fu Panda 3',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 4,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
|
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -85,6 +97,45 @@ class AppleTrailersIE(InfoExtractor):
|
|||||||
movie = mobj.group('movie')
|
movie = mobj.group('movie')
|
||||||
uploader_id = mobj.group('company')
|
uploader_id = mobj.group('company')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, movie)
|
||||||
|
film_id = self._search_regex(r"FilmId\s*=\s*'(\d+)'", webpage, 'film id')
|
||||||
|
film_data = self._download_json(
|
||||||
|
'http://trailers.apple.com/trailers/feeds/data/%s.json' % film_id,
|
||||||
|
film_id, fatal=False)
|
||||||
|
|
||||||
|
if film_data:
|
||||||
|
entries = []
|
||||||
|
for clip in film_data.get('clips', []):
|
||||||
|
clip_title = clip['title']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for version, version_data in clip.get('versions', {}).items():
|
||||||
|
for size, size_data in version_data.get('sizes', {}).items():
|
||||||
|
src = size_data.get('src')
|
||||||
|
if not src:
|
||||||
|
continue
|
||||||
|
formats.append({
|
||||||
|
'format_id': '%s-%s' % (version, size),
|
||||||
|
'url': re.sub(r'_(\d+p.mov)', r'_h\1', src),
|
||||||
|
'width': int_or_none(size_data.get('width')),
|
||||||
|
'height': int_or_none(size_data.get('height')),
|
||||||
|
'language': version[:2],
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
entries.append({
|
||||||
|
'id': movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', clip_title).lower(),
|
||||||
|
'formats': formats,
|
||||||
|
'title': clip_title,
|
||||||
|
'thumbnail': clip.get('screen') or clip.get('thumb'),
|
||||||
|
'duration': parse_duration(clip.get('runtime') or clip.get('faded')),
|
||||||
|
'upload_date': unified_strdate(clip.get('posted')),
|
||||||
|
'uploader_id': uploader_id,
|
||||||
|
})
|
||||||
|
|
||||||
|
page_data = film_data.get('page', {})
|
||||||
|
return self.playlist_result(entries, film_id, page_data.get('movie_title'))
|
||||||
|
|
||||||
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
|
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
|
||||||
|
|
||||||
def fix_html(s):
|
def fix_html(s):
|
||||||
|
@ -8,7 +8,6 @@ from .generic import GenericIE
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
get_element_by_attribute,
|
|
||||||
qualities,
|
qualities,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
@ -274,41 +273,3 @@ class ARDIE(InfoExtractor):
|
|||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class SportschauIE(ARDMediathekIE):
|
|
||||||
IE_NAME = 'Sportschau'
|
|
||||||
_VALID_URL = r'(?P<baseurl>https?://(?:www\.)?sportschau\.de/(?:[^/]+/)+video(?P<id>[^/#?]+))\.html'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.sportschau.de/tourdefrance/videoseppeltkokainhatnichtsmitklassischemdopingzutun100.html',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'seppeltkokainhatnichtsmitklassischemdopingzutun100',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Seppelt: "Kokain hat nichts mit klassischem Doping zu tun"',
|
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
|
||||||
'description': 'Der ARD-Doping Experte Hajo Seppelt gibt seine Einschätzung zum ersten Dopingfall der diesjährigen Tour de France um den Italiener Luca Paolini ab.',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
base_url = mobj.group('baseurl')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
title = get_element_by_attribute('class', 'headline', webpage)
|
|
||||||
description = self._html_search_meta('description', webpage, 'description')
|
|
||||||
|
|
||||||
info = self._extract_media_info(
|
|
||||||
base_url + '-mc_defaultQuality-h.json', webpage, video_id)
|
|
||||||
|
|
||||||
info.update({
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
})
|
|
||||||
|
|
||||||
return info
|
|
||||||
|
@ -180,11 +180,14 @@ class ArteTVBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
class ArteTVPlus7IE(ArteTVBaseIE):
|
class ArteTVPlus7IE(ArteTVBaseIE):
|
||||||
IE_NAME = 'arte.tv:+7'
|
IE_NAME = 'arte.tv:+7'
|
||||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de|en|es)/(?:(?:sendungen|emissions|embed)/)?(?P<id>[^/]+)/(?P<name>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:(?:www|sites)\.)?arte\.tv/[^/]+/(?P<lang>fr|de|en|es)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.arte.tv/guide/de/sendungen/XEN/xenius/?vid=055918-015_PLUS7-D',
|
'url': 'http://www.arte.tv/guide/de/sendungen/XEN/xenius/?vid=055918-015_PLUS7-D',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://sites.arte.tv/karambolage/de/video/karambolage-22',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -240,10 +243,10 @@ class ArteTVPlus7IE(ArteTVBaseIE):
|
|||||||
return self._extract_from_json_url(json_url, video_id, lang, title=title)
|
return self._extract_from_json_url(json_url, video_id, lang, title=title)
|
||||||
# Different kind of embed URL (e.g.
|
# Different kind of embed URL (e.g.
|
||||||
# http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium)
|
# http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium)
|
||||||
embed_url = self._search_regex(
|
entries = [
|
||||||
r'<iframe[^>]+src=(["\'])(?P<url>.+?)\1',
|
self.url_result(url)
|
||||||
webpage, 'embed url', group='url')
|
for _, url in re.findall(r'<iframe[^>]+src=(["\'])(?P<url>.+?)\1', webpage)]
|
||||||
return self.url_result(embed_url)
|
return self.playlist_result(entries)
|
||||||
|
|
||||||
|
|
||||||
# It also uses the arte_vp_url url from the webpage to extract the information
|
# It also uses the arte_vp_url url from the webpage to extract the information
|
||||||
@ -252,22 +255,17 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
|
|||||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de|en|es)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de|en|es)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
'url': 'http://creative.arte.tv/fr/episode/osmosis-episode-1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '72176',
|
'id': '057405-001-A',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Folge 2 - Corporate Design',
|
'title': 'OSMOSIS - N\'AYEZ PLUS PEUR D\'AIMER (1)',
|
||||||
'upload_date': '20131004',
|
'upload_date': '20150716',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://creative.arte.tv/fr/Monty-Python-Reunion',
|
'url': 'http://creative.arte.tv/fr/Monty-Python-Reunion',
|
||||||
'info_dict': {
|
'playlist_count': 11,
|
||||||
'id': '160676',
|
'add_ie': ['Youtube'],
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Monty Python live (mostly)',
|
|
||||||
'description': 'Événement ! Quarante-cinq ans après leurs premiers succès, les légendaires Monty Python remontent sur scène.\n',
|
|
||||||
'upload_date': '20140805',
|
|
||||||
}
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://creative.arte.tv/de/episode/agentur-amateur-4-der-erste-kunde',
|
'url': 'http://creative.arte.tv/de/episode/agentur-amateur-4-der-erste-kunde',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -349,14 +347,13 @@ class ArteTVCinemaIE(ArteTVPlus7IE):
|
|||||||
_VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>.+)'
|
_VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>.+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://cinema.arte.tv/de/node/38291',
|
'url': 'http://cinema.arte.tv/fr/article/les-ailes-du-desir-de-julia-reck',
|
||||||
'md5': '6b275511a5107c60bacbeeda368c3aa1',
|
'md5': 'a5b9dd5575a11d93daf0e3f404f45438',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '055876-000_PWA12025-D',
|
'id': '062494-000-A',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Tod auf dem Nil',
|
'title': 'Film lauréat du concours web - "Les ailes du désir" de Julia Reck',
|
||||||
'upload_date': '20160122',
|
'upload_date': '20150807',
|
||||||
'description': 'md5:7f749bbb77d800ef2be11d54529b96bc',
|
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@ -46,6 +46,7 @@ class AzubuIE(InfoExtractor):
|
|||||||
'uploader_id': 272749,
|
'uploader_id': 272749,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
},
|
},
|
||||||
|
'skip': 'Channel offline',
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -56,22 +57,26 @@ class AzubuIE(InfoExtractor):
|
|||||||
'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
|
'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data']
|
||||||
|
|
||||||
title = data['title'].strip()
|
title = data['title'].strip()
|
||||||
description = data['description']
|
description = data.get('description')
|
||||||
thumbnail = data['thumbnail']
|
thumbnail = data.get('thumbnail')
|
||||||
view_count = data['view_count']
|
view_count = data.get('view_count')
|
||||||
uploader = data['user']['username']
|
user = data.get('user', {})
|
||||||
uploader_id = data['user']['id']
|
uploader = user.get('username')
|
||||||
|
uploader_id = user.get('id')
|
||||||
|
|
||||||
stream_params = json.loads(data['stream_params'])
|
stream_params = json.loads(data['stream_params'])
|
||||||
|
|
||||||
timestamp = float_or_none(stream_params['creationDate'], 1000)
|
timestamp = float_or_none(stream_params.get('creationDate'), 1000)
|
||||||
duration = float_or_none(stream_params['length'], 1000)
|
duration = float_or_none(stream_params.get('length'), 1000)
|
||||||
|
|
||||||
renditions = stream_params.get('renditions') or []
|
renditions = stream_params.get('renditions') or []
|
||||||
video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
|
video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength')
|
||||||
if video:
|
if video:
|
||||||
renditions.append(video)
|
renditions.append(video)
|
||||||
|
|
||||||
|
if not renditions and not user.get('channel', {}).get('is_live', True):
|
||||||
|
raise ExtractorError('%s said: channel is offline.' % self.IE_NAME, expected=True)
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': fmt['url'],
|
'url': fmt['url'],
|
||||||
'width': fmt['frameWidth'],
|
'width': fmt['frameWidth'],
|
||||||
|
@ -31,7 +31,7 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
music/clips[/#]|
|
music/clips[/#]|
|
||||||
radio/player/
|
radio/player/
|
||||||
)
|
)
|
||||||
(?P<id>%s)
|
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
|
||||||
''' % _ID_REGEX
|
''' % _ID_REGEX
|
||||||
|
|
||||||
_MEDIASELECTOR_URLS = [
|
_MEDIASELECTOR_URLS = [
|
||||||
@ -192,6 +192,7 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
# rtmp download
|
# rtmp download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
'skip': 'Now it\'s really geo-restricted',
|
||||||
}, {
|
}, {
|
||||||
# compact player (https://github.com/rg3/youtube-dl/issues/8147)
|
# compact player (https://github.com/rg3/youtube-dl/issues/8147)
|
||||||
'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player',
|
'url': 'http://www.bbc.co.uk/programmes/p028bfkf/player',
|
||||||
@ -698,7 +699,9 @@ class BBCIE(BBCCoUkIE):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url)
|
EXCLUDE_IE = (BBCCoUkIE, BBCCoUkArticleIE, BBCCoUkIPlayerPlaylistIE, BBCCoUkPlaylistIE)
|
||||||
|
return (False if any(ie.suitable(url) for ie in EXCLUDE_IE)
|
||||||
|
else super(BBCIE, cls).suitable(url))
|
||||||
|
|
||||||
def _extract_from_media_meta(self, media_meta, video_id):
|
def _extract_from_media_meta(self, media_meta, video_id):
|
||||||
# Direct links to media in media metadata (e.g.
|
# Direct links to media in media metadata (e.g.
|
||||||
@ -975,3 +978,72 @@ class BBCCoUkArticleIE(InfoExtractor):
|
|||||||
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
|
r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)]
|
||||||
|
|
||||||
return self.playlist_result(entries, playlist_id, title, description)
|
return self.playlist_result(entries, playlist_id, title, description)
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkPlaylistBaseIE(InfoExtractor):
|
||||||
|
def _real_extract(self, url):
|
||||||
|
playlist_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
|
entries = [
|
||||||
|
self.url_result(self._URL_TEMPLATE % video_id, BBCCoUkIE.ie_key())
|
||||||
|
for video_id in re.findall(
|
||||||
|
self._VIDEO_ID_TEMPLATE % BBCCoUkIE._ID_REGEX, webpage)]
|
||||||
|
|
||||||
|
title, description = self._extract_title_and_description(webpage)
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, title, description)
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkIPlayerPlaylistIE(BBCCoUkPlaylistBaseIE):
|
||||||
|
IE_NAME = 'bbc.co.uk:iplayer:playlist'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/iplayer/episodes/(?P<id>%s)' % BBCCoUkIE._ID_REGEX
|
||||||
|
_URL_TEMPLATE = 'http://www.bbc.co.uk/iplayer/episode/%s'
|
||||||
|
_VIDEO_ID_TEMPLATE = r'data-ip-id=["\'](%s)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.bbc.co.uk/iplayer/episodes/b05rcz9v',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b05rcz9v',
|
||||||
|
'title': 'The Disappearance',
|
||||||
|
'description': 'French thriller serial about a missing teenager.',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _extract_title_and_description(self, webpage):
|
||||||
|
title = self._search_regex(r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False)
|
||||||
|
description = self._search_regex(
|
||||||
|
r'<p[^>]+class=(["\'])subtitle\1[^>]*>(?P<value>[^<]+)</p>',
|
||||||
|
webpage, 'description', fatal=False, group='value')
|
||||||
|
return title, description
|
||||||
|
|
||||||
|
|
||||||
|
class BBCCoUkPlaylistIE(BBCCoUkPlaylistBaseIE):
|
||||||
|
IE_NAME = 'bbc.co.uk:playlist'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/programmes/(?P<id>%s)/(?:episodes|broadcasts|clips)' % BBCCoUkIE._ID_REGEX
|
||||||
|
_URL_TEMPLATE = 'http://www.bbc.co.uk/programmes/%s'
|
||||||
|
_VIDEO_ID_TEMPLATE = r'data-pid=["\'](%s)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b05rcz9v',
|
||||||
|
'title': 'The Disappearance - Clips - BBC Four',
|
||||||
|
'description': 'French thriller serial about a missing teenager.',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 7,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/broadcasts/2016/06',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b05rcz9v/clips',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.bbc.co.uk/programmes/b055jkys/episodes/player',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _extract_title_and_description(self, webpage):
|
||||||
|
title = self._og_search_title(webpage, fatal=False)
|
||||||
|
description = self._og_search_description(webpage)
|
||||||
|
return title, description
|
||||||
|
@ -1,31 +1,27 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..utils import unified_strdate
|
||||||
from ..utils import (
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
xpath_text,
|
|
||||||
xpath_with_ns,
|
|
||||||
int_or_none,
|
|
||||||
parse_iso8601,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class BetIE(InfoExtractor):
|
class BetIE(MTVServicesInfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
|
_VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
|
'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'news/national/2014/a-conversation-with-president-obama',
|
'id': '07e96bd3-8850-3051-b856-271b457f0ab8',
|
||||||
'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
|
'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': 'A Conversation With President Obama',
|
'title': 'A Conversation With President Obama',
|
||||||
'description': 'md5:699d0652a350cf3e491cd15cc745b5da',
|
'description': 'President Obama urges persistence in confronting racism and bias.',
|
||||||
'duration': 1534,
|
'duration': 1534,
|
||||||
'timestamp': 1418075340,
|
|
||||||
'upload_date': '20141208',
|
'upload_date': '20141208',
|
||||||
'uploader': 'admin',
|
|
||||||
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
||||||
|
'subtitles': {
|
||||||
|
'en': 'mincount:2',
|
||||||
|
}
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# rtmp download
|
# rtmp download
|
||||||
@ -35,16 +31,17 @@ class BetIE(InfoExtractor):
|
|||||||
{
|
{
|
||||||
'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
|
'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'news/national/2014/justice-for-ferguson-a-community-reacts',
|
'id': '9f516bf1-7543-39c4-8076-dd441b459ba9',
|
||||||
'display_id': 'justice-for-ferguson-a-community-reacts',
|
'display_id': 'justice-for-ferguson-a-community-reacts',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': 'Justice for Ferguson: A Community Reacts',
|
'title': 'Justice for Ferguson: A Community Reacts',
|
||||||
'description': 'A BET News special.',
|
'description': 'A BET News special.',
|
||||||
'duration': 1696,
|
'duration': 1696,
|
||||||
'timestamp': 1416942360,
|
|
||||||
'upload_date': '20141125',
|
'upload_date': '20141125',
|
||||||
'uploader': 'admin',
|
|
||||||
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
||||||
|
'subtitles': {
|
||||||
|
'en': 'mincount:2',
|
||||||
|
}
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# rtmp download
|
# rtmp download
|
||||||
@ -53,57 +50,32 @@ class BetIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
_FEED_URL = "http://feeds.mtvnservices.com/od/feed/bet-mrss-player"
|
||||||
|
|
||||||
|
def _get_feed_query(self, uri):
|
||||||
|
return compat_urllib_parse_urlencode({
|
||||||
|
'uuid': uri,
|
||||||
|
})
|
||||||
|
|
||||||
|
def _extract_mgid(self, webpage):
|
||||||
|
return self._search_regex(r'data-uri="([^"]+)', webpage, 'mgid')
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
mgid = self._extract_mgid(webpage)
|
||||||
|
videos_info = self._get_videos_info(mgid)
|
||||||
|
|
||||||
media_url = compat_urllib_parse_unquote(self._search_regex(
|
info_dict = videos_info['entries'][0]
|
||||||
[r'mediaURL\s*:\s*"([^"]+)"', r"var\s+mrssMediaUrl\s*=\s*'([^']+)'"],
|
|
||||||
webpage, 'media URL'))
|
|
||||||
|
|
||||||
video_id = self._search_regex(
|
upload_date = unified_strdate(self._html_search_meta('date', webpage))
|
||||||
r'/video/(.*)/_jcr_content/', media_url, 'video id')
|
description = self._html_search_meta('description', webpage)
|
||||||
|
|
||||||
mrss = self._download_xml(media_url, display_id)
|
info_dict.update({
|
||||||
|
|
||||||
item = mrss.find('./channel/item')
|
|
||||||
|
|
||||||
NS_MAP = {
|
|
||||||
'dc': 'http://purl.org/dc/elements/1.1/',
|
|
||||||
'media': 'http://search.yahoo.com/mrss/',
|
|
||||||
'ka': 'http://kickapps.com/karss',
|
|
||||||
}
|
|
||||||
|
|
||||||
title = xpath_text(item, './title', 'title')
|
|
||||||
description = xpath_text(
|
|
||||||
item, './description', 'description', fatal=False)
|
|
||||||
|
|
||||||
timestamp = parse_iso8601(xpath_text(
|
|
||||||
item, xpath_with_ns('./dc:date', NS_MAP),
|
|
||||||
'upload date', fatal=False))
|
|
||||||
uploader = xpath_text(
|
|
||||||
item, xpath_with_ns('./dc:creator', NS_MAP),
|
|
||||||
'uploader', fatal=False)
|
|
||||||
|
|
||||||
media_content = item.find(
|
|
||||||
xpath_with_ns('./media:content', NS_MAP))
|
|
||||||
duration = int_or_none(media_content.get('duration'))
|
|
||||||
smil_url = media_content.get('url')
|
|
||||||
|
|
||||||
thumbnail = media_content.find(
|
|
||||||
xpath_with_ns('./media:thumbnail', NS_MAP)).get('url')
|
|
||||||
|
|
||||||
formats = self._extract_smil_formats(smil_url, display_id)
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'title': title,
|
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'upload_date': upload_date,
|
||||||
'timestamp': timestamp,
|
})
|
||||||
'uploader': uploader,
|
|
||||||
'duration': duration,
|
return info_dict
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
|
@ -29,7 +29,8 @@ class BRIE(InfoExtractor):
|
|||||||
'duration': 180,
|
'duration': 180,
|
||||||
'uploader': 'Reinhard Weber',
|
'uploader': 'Reinhard Weber',
|
||||||
'upload_date': '20150422',
|
'upload_date': '20150422',
|
||||||
}
|
},
|
||||||
|
'skip': '404 not found',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.br.de/nachrichten/oberbayern/inhalt/muenchner-polizeipraesident-schreiber-gestorben-100.html',
|
'url': 'http://www.br.de/nachrichten/oberbayern/inhalt/muenchner-polizeipraesident-schreiber-gestorben-100.html',
|
||||||
@ -40,7 +41,8 @@ class BRIE(InfoExtractor):
|
|||||||
'title': 'Manfred Schreiber ist tot',
|
'title': 'Manfred Schreiber ist tot',
|
||||||
'description': 'md5:b454d867f2a9fc524ebe88c3f5092d97',
|
'description': 'md5:b454d867f2a9fc524ebe88c3f5092d97',
|
||||||
'duration': 26,
|
'duration': 26,
|
||||||
}
|
},
|
||||||
|
'skip': '404 not found',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'https://www.br-klassik.de/audio/peeping-tom-premierenkritik-dance-festival-muenchen-100.html',
|
'url': 'https://www.br-klassik.de/audio/peeping-tom-premierenkritik-dance-festival-muenchen-100.html',
|
||||||
@ -51,7 +53,8 @@ class BRIE(InfoExtractor):
|
|||||||
'title': 'Kurzweilig und sehr bewegend',
|
'title': 'Kurzweilig und sehr bewegend',
|
||||||
'description': 'md5:0351996e3283d64adeb38ede91fac54e',
|
'description': 'md5:0351996e3283d64adeb38ede91fac54e',
|
||||||
'duration': 296,
|
'duration': 296,
|
||||||
}
|
},
|
||||||
|
'skip': '404 not found',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.br.de/radio/bayern1/service/team/videos/team-video-erdelt100.html',
|
'url': 'http://www.br.de/radio/bayern1/service/team/videos/team-video-erdelt100.html',
|
||||||
|
88
youtube_dl/extractor/carambatv.py
Normal file
88
youtube_dl/extractor/carambatv.py
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
try_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CarambaTVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?:carambatv:|https?://video1\.carambatv\.ru/v/)(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://video1.carambatv.ru/v/191910501',
|
||||||
|
'md5': '2f4a81b7cfd5ab866ee2d7270cb34a2a',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '191910501',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'duration': 2678.31,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'carambatv:191910501',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
video = self._download_json(
|
||||||
|
'http://video1.carambatv.ru/v/%s/videoinfo.js' % video_id,
|
||||||
|
video_id)
|
||||||
|
|
||||||
|
title = video['title']
|
||||||
|
|
||||||
|
base_url = video.get('video') or 'http://video1.carambatv.ru/v/%s/' % video_id
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'url': base_url + f['fn'],
|
||||||
|
'height': int_or_none(f.get('height')),
|
||||||
|
'format_id': '%sp' % f['height'] if f.get('height') else None,
|
||||||
|
} for f in video['qualities'] if f.get('fn')]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
thumbnail = video.get('splash')
|
||||||
|
duration = float_or_none(try_get(
|
||||||
|
video, lambda x: x['annotations'][0]['end_time'], compat_str))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'duration': duration,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CarambaTVPageIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://carambatv\.ru/(?:[^/]+/)+(?P<id>[^/?#&]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://carambatv.ru/movie/bad-comedian/razborka-v-manile/',
|
||||||
|
'md5': '',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '191910501',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'duration': 2678.31,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
video_url = self._og_search_property('video:iframe', webpage, default=None)
|
||||||
|
|
||||||
|
if not video_url:
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'(?:video_id|crmb_vuid)\s*[:=]\s*["\']?(\d+)',
|
||||||
|
webpage, 'video id')
|
||||||
|
video_url = 'carambatv:%s' % video_id
|
||||||
|
|
||||||
|
return self.url_result(video_url, CarambaTVIE.ie_key())
|
@ -1,17 +1,13 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
from .theplatform import ThePlatformFeedIE
|
||||||
|
|
||||||
from .theplatform import ThePlatformIE
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
xpath_text,
|
|
||||||
xpath_element,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CBSBaseIE(ThePlatformIE):
|
class CBSBaseIE(ThePlatformFeedIE):
|
||||||
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
|
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
|
||||||
closed_caption_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', 'ClosedCaptionURL')
|
closed_caption_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', 'ClosedCaptionURL')
|
||||||
return {
|
return {
|
||||||
@ -21,9 +17,22 @@ class CBSBaseIE(ThePlatformIE):
|
|||||||
}]
|
}]
|
||||||
} if closed_caption_e is not None and closed_caption_e.attrib.get('value') else []
|
} if closed_caption_e is not None and closed_caption_e.attrib.get('value') else []
|
||||||
|
|
||||||
|
def _extract_video_info(self, filter_query, video_id):
|
||||||
|
return self._extract_feed_info(
|
||||||
|
'dJ5BDC', 'VxxJg8Ymh8sE', filter_query, video_id, lambda entry: {
|
||||||
|
'series': entry.get('cbs$SeriesTitle'),
|
||||||
|
'season_number': int_or_none(entry.get('cbs$SeasonNumber')),
|
||||||
|
'episode': entry.get('cbs$EpisodeTitle'),
|
||||||
|
'episode_number': int_or_none(entry.get('cbs$EpisodeNumber')),
|
||||||
|
}, {
|
||||||
|
'StreamPack': {
|
||||||
|
'manifest': 'm3u',
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
class CBSIE(CBSBaseIE):
|
class CBSIE(CBSBaseIE):
|
||||||
_VALID_URL = r'(?:cbs:(?P<content_id>\w+)|https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/(?:video|artist)|colbertlateshow\.com/(?:video|podcasts))/[^/]+/(?P<display_id>[^/]+))'
|
_VALID_URL = r'(?:cbs:|https?://(?:www\.)?(?:cbs\.com/shows/[^/]+/video|colbertlateshow\.com/(?:video|podcasts))/)(?P<id>[\w-]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
|
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
|
||||||
@ -38,25 +47,7 @@ class CBSIE(CBSBaseIE):
|
|||||||
'upload_date': '20131127',
|
'upload_date': '20131127',
|
||||||
'uploader': 'CBSI-NEW',
|
'uploader': 'CBSI-NEW',
|
||||||
},
|
},
|
||||||
'params': {
|
'expected_warnings': ['Failed to download m3u8 information'],
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'_skip': 'Blocked outside the US',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'WWF_5KqY3PK1',
|
|
||||||
'display_id': 'st-vincent',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Live on Letterman - St. Vincent',
|
|
||||||
'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
|
|
||||||
'duration': 3221,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'_skip': 'Blocked outside the US',
|
'_skip': 'Blocked outside the US',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://colbertlateshow.com/video/8GmB0oY0McANFvp2aEffk9jZZZ2YyXxy/the-colbeard/',
|
'url': 'http://colbertlateshow.com/video/8GmB0oY0McANFvp2aEffk9jZZZ2YyXxy/the-colbeard/',
|
||||||
@ -68,44 +59,5 @@ class CBSIE(CBSBaseIE):
|
|||||||
TP_RELEASE_URL_TEMPLATE = 'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true'
|
TP_RELEASE_URL_TEMPLATE = 'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
content_id, display_id = re.match(self._VALID_URL, url).groups()
|
content_id = self._match_id(url)
|
||||||
if not content_id:
|
return self._extract_video_info('byGuid=%s' % content_id, content_id)
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
content_id = self._search_regex(
|
|
||||||
[r"video\.settings\.content_id\s*=\s*'([^']+)';", r"cbsplayer\.contentId\s*=\s*'([^']+)';"],
|
|
||||||
webpage, 'content id')
|
|
||||||
items_data = self._download_xml(
|
|
||||||
'http://can.cbs.com/thunder/player/videoPlayerService.php',
|
|
||||||
content_id, query={'partner': 'cbs', 'contentId': content_id})
|
|
||||||
video_data = xpath_element(items_data, './/item')
|
|
||||||
title = xpath_text(video_data, 'videoTitle', 'title', True)
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
formats = []
|
|
||||||
for item in items_data.findall('.//item'):
|
|
||||||
pid = xpath_text(item, 'pid')
|
|
||||||
if not pid:
|
|
||||||
continue
|
|
||||||
tp_release_url = self.TP_RELEASE_URL_TEMPLATE % pid
|
|
||||||
if '.m3u8' in xpath_text(item, 'contentUrl', default=''):
|
|
||||||
tp_release_url += '&manifest=m3u'
|
|
||||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
|
||||||
tp_release_url, content_id, 'Downloading %s SMIL data' % pid)
|
|
||||||
formats.extend(tp_formats)
|
|
||||||
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
info = self.get_metadata('dJ5BDC/media/guid/2198311517/%s' % content_id, content_id)
|
|
||||||
info.update({
|
|
||||||
'id': content_id,
|
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
|
||||||
'series': xpath_text(video_data, 'seriesTitle'),
|
|
||||||
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
|
|
||||||
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
|
|
||||||
'duration': int_or_none(xpath_text(video_data, 'videoLength'), 1000),
|
|
||||||
'thumbnail': xpath_text(video_data, 'previewImageURL'),
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
})
|
|
||||||
return info
|
|
||||||
|
@ -30,9 +30,12 @@ class CBSNewsIE(CBSBaseIE):
|
|||||||
{
|
{
|
||||||
'url': 'http://www.cbsnews.com/videos/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/',
|
'url': 'http://www.cbsnews.com/videos/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack',
|
'id': 'SNJBOYzXiWBOvaLsdzwH8fmtP1SCd91Y',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack',
|
'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack',
|
||||||
|
'description': 'md5:4a6983e480542d8b333a947bfc64ddc7',
|
||||||
|
'upload_date': '19700101',
|
||||||
|
'uploader': 'CBSI-NEW',
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
'duration': 205,
|
'duration': 205,
|
||||||
'subtitles': {
|
'subtitles': {
|
||||||
@ -58,30 +61,8 @@ class CBSNewsIE(CBSBaseIE):
|
|||||||
webpage, 'video JSON info'), video_id)
|
webpage, 'video JSON info'), video_id)
|
||||||
|
|
||||||
item = video_info['item'] if 'item' in video_info else video_info
|
item = video_info['item'] if 'item' in video_info else video_info
|
||||||
title = item.get('articleTitle') or item.get('hed')
|
guid = item['mpxRefId']
|
||||||
duration = item.get('duration')
|
return self._extract_video_info('byGuid=%s' % guid, guid)
|
||||||
thumbnail = item.get('mediaImage') or item.get('thumbnail')
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
formats = []
|
|
||||||
for format_id in ['RtmpMobileLow', 'RtmpMobileHigh', 'Hls', 'RtmpDesktop']:
|
|
||||||
pid = item.get('media' + format_id)
|
|
||||||
if not pid:
|
|
||||||
continue
|
|
||||||
release_url = 'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true' % pid
|
|
||||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % pid)
|
|
||||||
formats.extend(tp_formats)
|
|
||||||
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'duration': duration,
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class CBSNewsLiveVideoIE(InfoExtractor):
|
class CBSNewsLiveVideoIE(InfoExtractor):
|
||||||
|
@ -1,30 +1,28 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
from .cbs import CBSBaseIE
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
|
|
||||||
|
|
||||||
class CBSSportsIE(InfoExtractor):
|
class CBSSportsIE(CBSBaseIE):
|
||||||
_VALID_URL = r'https?://www\.cbssports\.com/video/player/(?P<section>[^/]+)/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://www\.cbssports\.com/video/player/[^/]+/(?P<id>\d+)'
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.cbssports.com/video/player/tennis/318462531970/0/us-open-flashbacks-1990s',
|
'url': 'http://www.cbssports.com/video/player/videos/708337219968/0/ben-simmons-the-next-lebron?-not-so-fast',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '_d5_GbO8p1sT',
|
'id': '708337219968',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'US Open flashbacks: 1990s',
|
'title': 'Ben Simmons the next LeBron? Not so fast',
|
||||||
'description': 'Bill Macatee relives the best moments in US Open history from the 1990s.',
|
'description': 'md5:854294f627921baba1f4b9a990d87197',
|
||||||
|
'timestamp': 1466293740,
|
||||||
|
'upload_date': '20160618',
|
||||||
|
'uploader': 'CBSI-NEW',
|
||||||
},
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
}
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
section = mobj.group('section')
|
return self._extract_video_info('byId=%s' % video_id, video_id)
|
||||||
video_id = mobj.group('id')
|
|
||||||
all_videos = self._download_json(
|
|
||||||
'http://www.cbssports.com/data/video/player/getVideos/%s?as=json' % section,
|
|
||||||
video_id)
|
|
||||||
# The json file contains the info of all the videos in the section
|
|
||||||
video_info = next(v for v in all_videos if v['pcid'] == video_id)
|
|
||||||
return self.url_result('theplatform:%s' % video_info['pid'], 'ThePlatform')
|
|
||||||
|
@ -58,7 +58,8 @@ class CDAIE(InfoExtractor):
|
|||||||
def extract_format(page, version):
|
def extract_format(page, version):
|
||||||
unpacked = decode_packed_codes(page)
|
unpacked = decode_packed_codes(page)
|
||||||
format_url = self._search_regex(
|
format_url = self._search_regex(
|
||||||
r"url:\\'(.+?)\\'", unpacked, '%s url' % version, fatal=False)
|
r"(?:file|url)\s*:\s*(\\?[\"'])(?P<url>http.+?)\1", unpacked,
|
||||||
|
'%s url' % version, fatal=False, group='url')
|
||||||
if not format_url:
|
if not format_url:
|
||||||
return
|
return
|
||||||
f = {
|
f = {
|
||||||
@ -75,7 +76,8 @@ class CDAIE(InfoExtractor):
|
|||||||
info_dict['formats'].append(f)
|
info_dict['formats'].append(f)
|
||||||
if not info_dict['duration']:
|
if not info_dict['duration']:
|
||||||
info_dict['duration'] = parse_duration(self._search_regex(
|
info_dict['duration'] = parse_duration(self._search_regex(
|
||||||
r"duration:\\'(.+?)\\'", unpacked, 'duration', fatal=False))
|
r"duration\s*:\s*(\\?[\"'])(?P<duration>.+?)\1",
|
||||||
|
unpacked, 'duration', fatal=False, group='duration'))
|
||||||
|
|
||||||
extract_format(webpage, 'default')
|
extract_format(webpage, 'default')
|
||||||
|
|
||||||
|
92
youtube_dl/extractor/closertotruth.py
Normal file
92
youtube_dl/extractor/closertotruth.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class CloserToTruthIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?closertotruth\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://closertotruth.com/series/solutions-the-mind-body-problem#video-3688',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '0_zof1ktre',
|
||||||
|
'display_id': 'solutions-the-mind-body-problem',
|
||||||
|
'ext': 'mov',
|
||||||
|
'title': 'Solutions to the Mind-Body Problem?',
|
||||||
|
'upload_date': '20140221',
|
||||||
|
'timestamp': 1392956007,
|
||||||
|
'uploader_id': 'CTTXML'
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://closertotruth.com/episodes/how-do-brains-work',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '0_iuxai6g6',
|
||||||
|
'display_id': 'how-do-brains-work',
|
||||||
|
'ext': 'mov',
|
||||||
|
'title': 'How do Brains Work?',
|
||||||
|
'upload_date': '20140221',
|
||||||
|
'timestamp': 1392956024,
|
||||||
|
'uploader_id': 'CTTXML'
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://closertotruth.com/interviews/1725',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1725',
|
||||||
|
'title': 'AyaFr-002',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 2,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
partner_id = self._search_regex(
|
||||||
|
r'<script[^>]+src=["\'].*?\b(?:partner_id|p)/(\d+)',
|
||||||
|
webpage, 'kaltura partner_id')
|
||||||
|
|
||||||
|
title = self._search_regex(
|
||||||
|
r'<title>(.+?)\s*\|\s*.+?</title>', webpage, 'video title')
|
||||||
|
|
||||||
|
select = self._search_regex(
|
||||||
|
r'(?s)<select[^>]+id="select-version"[^>]*>(.+?)</select>',
|
||||||
|
webpage, 'select version', default=None)
|
||||||
|
if select:
|
||||||
|
entry_ids = set()
|
||||||
|
entries = []
|
||||||
|
for mobj in re.finditer(
|
||||||
|
r'<option[^>]+value=(["\'])(?P<id>[0-9a-z_]+)(?:#.+?)?\1[^>]*>(?P<title>[^<]+)',
|
||||||
|
webpage):
|
||||||
|
entry_id = mobj.group('id')
|
||||||
|
if entry_id in entry_ids:
|
||||||
|
continue
|
||||||
|
entry_ids.add(entry_id)
|
||||||
|
entries.append({
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'kaltura:%s:%s' % (partner_id, entry_id),
|
||||||
|
'ie_key': 'Kaltura',
|
||||||
|
'title': mobj.group('title'),
|
||||||
|
})
|
||||||
|
if entries:
|
||||||
|
return self.playlist_result(entries, display_id, title)
|
||||||
|
|
||||||
|
entry_id = self._search_regex(
|
||||||
|
r'<a[^>]+id=(["\'])embed-kaltura\1[^>]+data-kaltura=(["\'])(?P<id>[0-9a-z_]+)\2',
|
||||||
|
webpage, 'kaltura entry_id', group='id')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'display_id': display_id,
|
||||||
|
'url': 'kaltura:%s:%s' % (partner_id, entry_id),
|
||||||
|
'ie_key': 'Kaltura',
|
||||||
|
'title': title
|
||||||
|
}
|
@ -53,6 +53,7 @@ from ..utils import (
|
|||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
update_Request,
|
update_Request,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
parse_m3u8_attributes,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -748,10 +749,12 @@ class InfoExtractor(object):
|
|||||||
return self._og_search_property('url', html, **kargs)
|
return self._og_search_property('url', html, **kargs)
|
||||||
|
|
||||||
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
|
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
|
||||||
|
if not isinstance(name, (list, tuple)):
|
||||||
|
name = [name]
|
||||||
if display_name is None:
|
if display_name is None:
|
||||||
display_name = name
|
display_name = name[0]
|
||||||
return self._html_search_regex(
|
return self._html_search_regex(
|
||||||
self._meta_regex(name),
|
[self._meta_regex(n) for n in name],
|
||||||
html, display_name, fatal=fatal, group='content', **kwargs)
|
html, display_name, fatal=fatal, group='content', **kwargs)
|
||||||
|
|
||||||
def _dc_search_uploader(self, html):
|
def _dc_search_uploader(self, html):
|
||||||
@ -875,7 +878,11 @@ class InfoExtractor(object):
|
|||||||
f['ext'] = determine_ext(f['url'])
|
f['ext'] = determine_ext(f['url'])
|
||||||
|
|
||||||
if isinstance(field_preference, (list, tuple)):
|
if isinstance(field_preference, (list, tuple)):
|
||||||
return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference)
|
return tuple(
|
||||||
|
f.get(field)
|
||||||
|
if f.get(field) is not None
|
||||||
|
else ('' if field == 'format_id' else -1)
|
||||||
|
for field in field_preference)
|
||||||
|
|
||||||
preference = f.get('preference')
|
preference = f.get('preference')
|
||||||
if preference is None:
|
if preference is None:
|
||||||
@ -1150,23 +1157,11 @@ class InfoExtractor(object):
|
|||||||
}]
|
}]
|
||||||
last_info = None
|
last_info = None
|
||||||
last_media = None
|
last_media = None
|
||||||
kv_rex = re.compile(
|
|
||||||
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
|
|
||||||
for line in m3u8_doc.splitlines():
|
for line in m3u8_doc.splitlines():
|
||||||
if line.startswith('#EXT-X-STREAM-INF:'):
|
if line.startswith('#EXT-X-STREAM-INF:'):
|
||||||
last_info = {}
|
last_info = parse_m3u8_attributes(line)
|
||||||
for m in kv_rex.finditer(line):
|
|
||||||
v = m.group('val')
|
|
||||||
if v.startswith('"'):
|
|
||||||
v = v[1:-1]
|
|
||||||
last_info[m.group('key')] = v
|
|
||||||
elif line.startswith('#EXT-X-MEDIA:'):
|
elif line.startswith('#EXT-X-MEDIA:'):
|
||||||
last_media = {}
|
last_media = parse_m3u8_attributes(line)
|
||||||
for m in kv_rex.finditer(line):
|
|
||||||
v = m.group('val')
|
|
||||||
if v.startswith('"'):
|
|
||||||
v = v[1:-1]
|
|
||||||
last_media[m.group('key')] = v
|
|
||||||
elif line.startswith('#') or not line.strip():
|
elif line.startswith('#') or not line.strip():
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
|
@ -20,7 +20,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class DCNIE(InfoExtractor):
|
class DCNIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<video_id>\d+)/(?P<season_id>\d+))?'
|
_VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<video_id>\d+)/(?P<season_id>\d+))?'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
show_id, video_id, season_id = re.match(self._VALID_URL, url).groups()
|
show_id, video_id, season_id = re.match(self._VALID_URL, url).groups()
|
||||||
@ -55,30 +55,32 @@ class DCNBaseIE(InfoExtractor):
|
|||||||
'is_live': is_live,
|
'is_live': is_live,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _extract_video_formats(self, webpage, video_id, entry_protocol):
|
def _extract_video_formats(self, webpage, video_id, m3u8_entry_protocol):
|
||||||
formats = []
|
formats = []
|
||||||
m3u8_url = self._html_search_regex(
|
format_url_base = 'http' + self._html_search_regex(
|
||||||
r'file\s*:\s*"([^"]+)', webpage, 'm3u8 url', fatal=False)
|
[
|
||||||
if m3u8_url:
|
r'file\s*:\s*"https?(://[^"]+)/playlist.m3u8',
|
||||||
|
r'<a[^>]+href="rtsp(://[^"]+)"'
|
||||||
|
], webpage, 'format url')
|
||||||
|
# TODO: Current DASH formats are broken - $Time$ pattern in
|
||||||
|
# <SegmentTemplate> not implemented yet
|
||||||
|
# formats.extend(self._extract_mpd_formats(
|
||||||
|
# format_url_base + '/manifest.mpd',
|
||||||
|
# video_id, mpd_id='dash', fatal=False))
|
||||||
formats.extend(self._extract_m3u8_formats(
|
formats.extend(self._extract_m3u8_formats(
|
||||||
m3u8_url, video_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=None))
|
format_url_base + '/playlist.m3u8', video_id, 'mp4',
|
||||||
|
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
|
||||||
rtsp_url = self._search_regex(
|
formats.extend(self._extract_f4m_formats(
|
||||||
r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False)
|
format_url_base + '/manifest.f4m',
|
||||||
if rtsp_url:
|
video_id, f4m_id='hds', fatal=False))
|
||||||
formats.append({
|
|
||||||
'url': rtsp_url,
|
|
||||||
'format_id': 'rtsp',
|
|
||||||
})
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
|
|
||||||
class DCNVideoIE(DCNBaseIE):
|
class DCNVideoIE(DCNBaseIE):
|
||||||
IE_NAME = 'dcn:video'
|
IE_NAME = 'dcn:video'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/[^/]+|media|catchup/[^/]+/[^/]+)/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?(?:video(?:/[^/]+)?|media|catchup/[^/]+/[^/]+)/(?P<id>\d+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.dcndigital.ae/#/video/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375',
|
'url': 'http://www.dcndigital.ae/#/video/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375',
|
||||||
'info_dict':
|
'info_dict':
|
||||||
{
|
{
|
||||||
@ -94,7 +96,10 @@ class DCNVideoIE(DCNBaseIE):
|
|||||||
# m3u8 download
|
# m3u8 download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://awaan.ae/video/26723981/%D8%AF%D8%A7%D8%B1-%D8%A7%D9%84%D8%B3%D9%84%D8%A7%D9%85:-%D8%AE%D9%8A%D8%B1-%D8%AF%D9%88%D8%B1-%D8%A7%D9%84%D8%A3%D9%86%D8%B5%D8%A7%D8%B1',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@ -120,7 +125,7 @@ class DCNVideoIE(DCNBaseIE):
|
|||||||
|
|
||||||
class DCNLiveIE(DCNBaseIE):
|
class DCNLiveIE(DCNBaseIE):
|
||||||
IE_NAME = 'dcn:live'
|
IE_NAME = 'dcn:live'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?live/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?live/(?P<id>\d+)'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel_id = self._match_id(url)
|
channel_id = self._match_id(url)
|
||||||
@ -147,7 +152,7 @@ class DCNLiveIE(DCNBaseIE):
|
|||||||
|
|
||||||
class DCNSeasonIE(InfoExtractor):
|
class DCNSeasonIE(InfoExtractor):
|
||||||
IE_NAME = 'dcn:season'
|
IE_NAME = 'dcn:season'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?program/(?:(?P<show_id>\d+)|season/(?P<season_id>\d+))'
|
_VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?program/(?:(?P<show_id>\d+)|season/(?P<season_id>\d+))'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://dcndigital.ae/#/program/205024/%D9%85%D8%AD%D8%A7%D8%B6%D8%B1%D8%A7%D8%AA-%D8%A7%D9%84%D8%B4%D9%8A%D8%AE-%D8%A7%D9%84%D8%B4%D8%B9%D8%B1%D8%A7%D9%88%D9%8A',
|
'url': 'http://dcndigital.ae/#/program/205024/%D9%85%D8%AD%D8%A7%D8%B6%D8%B1%D8%A7%D8%AA-%D8%A7%D9%84%D8%B4%D9%8A%D8%AE-%D8%A7%D9%84%D8%B4%D8%B9%D8%B1%D8%A7%D9%88%D9%8A',
|
||||||
'info_dict':
|
'info_dict':
|
||||||
|
@ -44,7 +44,6 @@ from .archiveorg import ArchiveOrgIE
|
|||||||
from .ard import (
|
from .ard import (
|
||||||
ARDIE,
|
ARDIE,
|
||||||
ARDMediathekIE,
|
ARDMediathekIE,
|
||||||
SportschauIE,
|
|
||||||
)
|
)
|
||||||
from .arte import (
|
from .arte import (
|
||||||
ArteTvIE,
|
ArteTvIE,
|
||||||
@ -71,6 +70,8 @@ from .bandcamp import BandcampIE, BandcampAlbumIE
|
|||||||
from .bbc import (
|
from .bbc import (
|
||||||
BBCCoUkIE,
|
BBCCoUkIE,
|
||||||
BBCCoUkArticleIE,
|
BBCCoUkArticleIE,
|
||||||
|
BBCCoUkIPlayerPlaylistIE,
|
||||||
|
BBCCoUkPlaylistIE,
|
||||||
BBCIE,
|
BBCIE,
|
||||||
)
|
)
|
||||||
from .beeg import BeegIE
|
from .beeg import BeegIE
|
||||||
@ -108,6 +109,10 @@ from .camwithher import CamWithHerIE
|
|||||||
from .canalplus import CanalplusIE
|
from .canalplus import CanalplusIE
|
||||||
from .canalc2 import Canalc2IE
|
from .canalc2 import Canalc2IE
|
||||||
from .canvas import CanvasIE
|
from .canvas import CanvasIE
|
||||||
|
from .carambatv import (
|
||||||
|
CarambaTVIE,
|
||||||
|
CarambaTVPageIE,
|
||||||
|
)
|
||||||
from .cbc import (
|
from .cbc import (
|
||||||
CBCIE,
|
CBCIE,
|
||||||
CBCPlayerIE,
|
CBCPlayerIE,
|
||||||
@ -135,6 +140,7 @@ from .cliprs import ClipRsIE
|
|||||||
from .clipfish import ClipfishIE
|
from .clipfish import ClipfishIE
|
||||||
from .cliphunter import CliphunterIE
|
from .cliphunter import CliphunterIE
|
||||||
from .clipsyndicate import ClipsyndicateIE
|
from .clipsyndicate import ClipsyndicateIE
|
||||||
|
from .closertotruth import CloserToTruthIE
|
||||||
from .cloudy import CloudyIE
|
from .cloudy import CloudyIE
|
||||||
from .clubic import ClubicIE
|
from .clubic import ClubicIE
|
||||||
from .clyp import ClypIE
|
from .clyp import ClypIE
|
||||||
@ -279,7 +285,6 @@ from .gameone import (
|
|||||||
from .gamersyde import GamersydeIE
|
from .gamersyde import GamersydeIE
|
||||||
from .gamespot import GameSpotIE
|
from .gamespot import GameSpotIE
|
||||||
from .gamestar import GameStarIE
|
from .gamestar import GameStarIE
|
||||||
from .gametrailers import GametrailersIE
|
|
||||||
from .gazeta import GazetaIE
|
from .gazeta import GazetaIE
|
||||||
from .gdcvault import GDCVaultIE
|
from .gdcvault import GDCVaultIE
|
||||||
from .generic import GenericIE
|
from .generic import GenericIE
|
||||||
@ -449,6 +454,7 @@ from .motherless import MotherlessIE
|
|||||||
from .motorsport import MotorsportIE
|
from .motorsport import MotorsportIE
|
||||||
from .movieclips import MovieClipsIE
|
from .movieclips import MovieClipsIE
|
||||||
from .moviezine import MoviezineIE
|
from .moviezine import MoviezineIE
|
||||||
|
from .msn import MSNIE
|
||||||
from .mtv import (
|
from .mtv import (
|
||||||
MTVIE,
|
MTVIE,
|
||||||
MTVServicesEmbeddedIE,
|
MTVServicesEmbeddedIE,
|
||||||
@ -475,7 +481,6 @@ from .nbc import (
|
|||||||
NBCNewsIE,
|
NBCNewsIE,
|
||||||
NBCSportsIE,
|
NBCSportsIE,
|
||||||
NBCSportsVPlayerIE,
|
NBCSportsVPlayerIE,
|
||||||
MSNBCIE,
|
|
||||||
)
|
)
|
||||||
from .ndr import (
|
from .ndr import (
|
||||||
NDRIE,
|
NDRIE,
|
||||||
@ -512,7 +517,10 @@ from .nhl import (
|
|||||||
NHLVideocenterCategoryIE,
|
NHLVideocenterCategoryIE,
|
||||||
NHLIE,
|
NHLIE,
|
||||||
)
|
)
|
||||||
from .nick import NickIE
|
from .nick import (
|
||||||
|
NickIE,
|
||||||
|
NickDeIE,
|
||||||
|
)
|
||||||
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
||||||
from .ninegag import NineGagIE
|
from .ninegag import NineGagIE
|
||||||
from .noco import NocoIE
|
from .noco import NocoIE
|
||||||
@ -599,6 +607,7 @@ from .pluralsight import (
|
|||||||
PluralsightCourseIE,
|
PluralsightCourseIE,
|
||||||
)
|
)
|
||||||
from .podomatic import PodomaticIE
|
from .podomatic import PodomaticIE
|
||||||
|
from .polskieradio import PolskieRadioIE
|
||||||
from .porn91 import Porn91IE
|
from .porn91 import Porn91IE
|
||||||
from .pornhd import PornHdIE
|
from .pornhd import PornHdIE
|
||||||
from .pornhub import (
|
from .pornhub import (
|
||||||
@ -622,7 +631,10 @@ from .qqmusic import (
|
|||||||
QQMusicToplistIE,
|
QQMusicToplistIE,
|
||||||
QQMusicPlaylistIE,
|
QQMusicPlaylistIE,
|
||||||
)
|
)
|
||||||
from .r7 import R7IE
|
from .r7 import (
|
||||||
|
R7IE,
|
||||||
|
R7ArticleIE,
|
||||||
|
)
|
||||||
from .radiocanada import (
|
from .radiocanada import (
|
||||||
RadioCanadaIE,
|
RadioCanadaIE,
|
||||||
RadioCanadaAudioVideoIE,
|
RadioCanadaAudioVideoIE,
|
||||||
@ -649,6 +661,7 @@ from .revision3 import (
|
|||||||
from .rice import RICEIE
|
from .rice import RICEIE
|
||||||
from .ringtv import RingTVIE
|
from .ringtv import RingTVIE
|
||||||
from .ro220 import Ro220IE
|
from .ro220 import Ro220IE
|
||||||
|
from .rockstargames import RockstarGamesIE
|
||||||
from .rottentomatoes import RottenTomatoesIE
|
from .rottentomatoes import RottenTomatoesIE
|
||||||
from .roxwel import RoxwelIE
|
from .roxwel import RoxwelIE
|
||||||
from .rtbf import RTBFIE
|
from .rtbf import RTBFIE
|
||||||
@ -737,6 +750,7 @@ from .sportbox import (
|
|||||||
SportBoxEmbedIE,
|
SportBoxEmbedIE,
|
||||||
)
|
)
|
||||||
from .sportdeutschland import SportDeutschlandIE
|
from .sportdeutschland import SportDeutschlandIE
|
||||||
|
from .sportschau import SportschauIE
|
||||||
from .srgssr import (
|
from .srgssr import (
|
||||||
SRGSSRIE,
|
SRGSSRIE,
|
||||||
SRGSSRPlayIE,
|
SRGSSRPlayIE,
|
||||||
@ -777,6 +791,7 @@ from .telecinco import TelecincoIE
|
|||||||
from .telegraaf import TelegraafIE
|
from .telegraaf import TelegraafIE
|
||||||
from .telemb import TeleMBIE
|
from .telemb import TeleMBIE
|
||||||
from .teletask import TeleTaskIE
|
from .teletask import TeleTaskIE
|
||||||
|
from .telewebion import TelewebionIE
|
||||||
from .testurl import TestURLIE
|
from .testurl import TestURLIE
|
||||||
from .tf1 import TF1IE
|
from .tf1 import TF1IE
|
||||||
from .theintercept import TheInterceptIE
|
from .theintercept import TheInterceptIE
|
||||||
@ -861,6 +876,7 @@ from .twitch import (
|
|||||||
TwitchProfileIE,
|
TwitchProfileIE,
|
||||||
TwitchPastBroadcastsIE,
|
TwitchPastBroadcastsIE,
|
||||||
TwitchStreamIE,
|
TwitchStreamIE,
|
||||||
|
TwitchClipsIE,
|
||||||
)
|
)
|
||||||
from .twitter import (
|
from .twitter import (
|
||||||
TwitterCardIE,
|
TwitterCardIE,
|
||||||
@ -901,6 +917,7 @@ from .vice import (
|
|||||||
ViceIE,
|
ViceIE,
|
||||||
ViceShowIE,
|
ViceShowIE,
|
||||||
)
|
)
|
||||||
|
from .vidbit import VidbitIE
|
||||||
from .viddler import ViddlerIE
|
from .viddler import ViddlerIE
|
||||||
from .videodetective import VideoDetectiveIE
|
from .videodetective import VideoDetectiveIE
|
||||||
from .videofyme import VideofyMeIE
|
from .videofyme import VideofyMeIE
|
||||||
@ -977,7 +994,10 @@ from .weiqitv import WeiqiTVIE
|
|||||||
from .wimp import WimpIE
|
from .wimp import WimpIE
|
||||||
from .wistia import WistiaIE
|
from .wistia import WistiaIE
|
||||||
from .worldstarhiphop import WorldStarHipHopIE
|
from .worldstarhiphop import WorldStarHipHopIE
|
||||||
from .wrzuta import WrzutaIE
|
from .wrzuta import (
|
||||||
|
WrzutaIE,
|
||||||
|
WrzutaPlaylistIE,
|
||||||
|
)
|
||||||
from .wsj import WSJIE
|
from .wsj import WSJIE
|
||||||
from .xbef import XBefIE
|
from .xbef import XBefIE
|
||||||
from .xboxclips import XboxClipsIE
|
from .xboxclips import XboxClipsIE
|
||||||
|
@ -239,6 +239,8 @@ class FacebookIE(InfoExtractor):
|
|||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id, f in video_data.items():
|
for format_id, f in video_data.items():
|
||||||
|
if f and isinstance(f, dict):
|
||||||
|
f = [f]
|
||||||
if not f or not isinstance(f, list):
|
if not f or not isinstance(f, list):
|
||||||
continue
|
continue
|
||||||
for quality in ('sd', 'hd'):
|
for quality in ('sd', 'hd'):
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import smuggle_url
|
from ..utils import (
|
||||||
|
smuggle_url,
|
||||||
|
update_url_query,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class FoxSportsIE(InfoExtractor):
|
class FoxSportsIE(InfoExtractor):
|
||||||
@ -9,11 +12,15 @@ class FoxSportsIE(InfoExtractor):
|
|||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.foxsports.com/video?vid=432609859715',
|
'url': 'http://www.foxsports.com/video?vid=432609859715',
|
||||||
|
'md5': 'b49050e955bebe32c301972e4012ac17',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'gA0bHB3Ladz3',
|
'id': 'i0qKWsk3qJaM',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Courtney Lee on going up 2-0 in series vs. Blazers',
|
'title': 'Courtney Lee on going up 2-0 in series vs. Blazers',
|
||||||
'description': 'Courtney Lee talks about Memphis being focused.',
|
'description': 'Courtney Lee talks about Memphis being focused.',
|
||||||
|
'upload_date': '20150423',
|
||||||
|
'timestamp': 1429761109,
|
||||||
|
'uploader': 'NEWA-FNG-FOXSPORTS',
|
||||||
},
|
},
|
||||||
'add_ie': ['ThePlatform'],
|
'add_ie': ['ThePlatform'],
|
||||||
}
|
}
|
||||||
@ -28,5 +35,8 @@ class FoxSportsIE(InfoExtractor):
|
|||||||
r"data-player-config='([^']+)'", webpage, 'data player config'),
|
r"data-player-config='([^']+)'", webpage, 'data player config'),
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
return self.url_result(smuggle_url(
|
return self.url_result(smuggle_url(update_url_query(
|
||||||
config['releaseURL'] + '&manifest=f4m', {'force_smil_url': True}))
|
config['releaseURL'], {
|
||||||
|
'mbr': 'true',
|
||||||
|
'switch': 'http',
|
||||||
|
}), {'force_smil_url': True}))
|
||||||
|
@ -1,19 +1,19 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .once import OnceIE
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
|
url_basename,
|
||||||
|
dict_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class GameSpotIE(InfoExtractor):
|
class GameSpotIE(OnceIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
|
_VALID_URL = r'https?://(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
|
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
|
||||||
@ -39,29 +39,73 @@ class GameSpotIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, page_id)
|
webpage = self._download_webpage(url, page_id)
|
||||||
data_video_json = self._search_regex(
|
data_video_json = self._search_regex(
|
||||||
r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
||||||
data_video = json.loads(unescapeHTML(data_video_json))
|
data_video = self._parse_json(unescapeHTML(data_video_json), page_id)
|
||||||
streams = data_video['videoStreams']
|
streams = data_video['videoStreams']
|
||||||
|
|
||||||
|
manifest_url = None
|
||||||
formats = []
|
formats = []
|
||||||
f4m_url = streams.get('f4m_stream')
|
f4m_url = streams.get('f4m_stream')
|
||||||
if f4m_url is not None:
|
if f4m_url:
|
||||||
# Transform the manifest url to a link to the mp4 files
|
manifest_url = f4m_url
|
||||||
# they are used in mobile devices.
|
formats.extend(self._extract_f4m_formats(
|
||||||
f4m_path = compat_urlparse.urlparse(f4m_url).path
|
f4m_url + '?hdcore=3.7.0', page_id, f4m_id='hds', fatal=False))
|
||||||
|
m3u8_url = streams.get('m3u8_stream')
|
||||||
|
if m3u8_url:
|
||||||
|
manifest_url = m3u8_url
|
||||||
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
m3u8_url, page_id, 'mp4', 'm3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False)
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
|
progressive_url = dict_get(
|
||||||
|
streams, ('progressive_hd', 'progressive_high', 'progressive_low'))
|
||||||
|
if progressive_url and manifest_url:
|
||||||
|
qualities_basename = self._search_regex(
|
||||||
|
'/([^/]+)\.csmil/',
|
||||||
|
manifest_url, 'qualities basename', default=None)
|
||||||
|
if qualities_basename:
|
||||||
QUALITIES_RE = r'((,\d+)+,?)'
|
QUALITIES_RE = r'((,\d+)+,?)'
|
||||||
qualities = self._search_regex(QUALITIES_RE, f4m_path, 'qualities').strip(',').split(',')
|
qualities = self._search_regex(
|
||||||
http_path = f4m_path[1:].split('/', 1)[1]
|
QUALITIES_RE, qualities_basename,
|
||||||
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
'qualities', default=None)
|
||||||
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
if qualities:
|
||||||
http_template = compat_urlparse.urljoin(
|
qualities = list(map(lambda q: int(q), qualities.strip(',').split(',')))
|
||||||
'http://video.gamespotcdn.com/', http_template)
|
qualities.sort()
|
||||||
|
http_template = re.sub(QUALITIES_RE, r'%d', qualities_basename)
|
||||||
|
http_url_basename = url_basename(progressive_url)
|
||||||
|
if m3u8_formats:
|
||||||
|
self._sort_formats(m3u8_formats)
|
||||||
|
m3u8_formats = list(filter(
|
||||||
|
lambda f: f.get('vcodec') != 'none' and f.get('resolution') != 'multiple',
|
||||||
|
m3u8_formats))
|
||||||
|
if len(qualities) == len(m3u8_formats):
|
||||||
|
for q, m3u8_format in zip(qualities, m3u8_formats):
|
||||||
|
f = m3u8_format.copy()
|
||||||
|
f.update({
|
||||||
|
'url': progressive_url.replace(
|
||||||
|
http_url_basename, http_template % q),
|
||||||
|
'format_id': f['format_id'].replace('hls', 'http'),
|
||||||
|
'protocol': 'http',
|
||||||
|
})
|
||||||
|
formats.append(f)
|
||||||
|
else:
|
||||||
for q in qualities:
|
for q in qualities:
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': http_template % q,
|
'url': progressive_url.replace(
|
||||||
|
http_url_basename, http_template % q),
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'format_id': q,
|
'format_id': 'http-%d' % q,
|
||||||
|
'tbr': q,
|
||||||
})
|
})
|
||||||
else:
|
|
||||||
|
onceux_json = self._search_regex(
|
||||||
|
r'data-onceux-options=["\'](.*?)["\']', webpage, 'data video', default=None)
|
||||||
|
if onceux_json:
|
||||||
|
onceux_url = self._parse_json(unescapeHTML(onceux_json), page_id).get('metadataUri')
|
||||||
|
if onceux_url:
|
||||||
|
formats.extend(self._extract_once_formats(re.sub(
|
||||||
|
r'https?://[^/]+', 'http://once.unicornmedia.com', onceux_url).replace('ads/vmap/', '')))
|
||||||
|
|
||||||
|
if not formats:
|
||||||
for quality in ['sd', 'hd']:
|
for quality in ['sd', 'hd']:
|
||||||
# It's actually a link to a flv file
|
# It's actually a link to a flv file
|
||||||
flv_url = streams.get('f4m_{0}'.format(quality))
|
flv_url = streams.get('f4m_{0}'.format(quality))
|
||||||
@ -71,6 +115,7 @@ class GameSpotIE(InfoExtractor):
|
|||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'format_id': quality,
|
'format_id': quality,
|
||||||
})
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': data_video['guid'],
|
'id': data_video['guid'],
|
||||||
|
@ -1,62 +0,0 @@
|
|||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
int_or_none,
|
|
||||||
parse_age_limit,
|
|
||||||
url_basename,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class GametrailersIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://www\.gametrailers\.com/videos/view/[^/]+/(?P<id>.+)'
|
|
||||||
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.gametrailers.com/videos/view/gametrailers-com/116437-Just-Cause-3-Review',
|
|
||||||
'md5': 'f28c4efa0bdfaf9b760f6507955b6a6a',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2983958',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'display_id': '116437-Just-Cause-3-Review',
|
|
||||||
'title': 'Just Cause 3 - Review',
|
|
||||||
'description': 'It\'s a lot of fun to shoot at things and then watch them explode in Just Cause 3, but should there be more to the experience than that?',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
title = self._html_search_regex(
|
|
||||||
r'<title>(.+?)\|', webpage, 'title').strip()
|
|
||||||
embed_url = self._proto_relative_url(
|
|
||||||
self._search_regex(
|
|
||||||
r'src=\'(//embed.gametrailers.com/embed/[^\']+)\'', webpage,
|
|
||||||
'embed url'),
|
|
||||||
scheme='http:')
|
|
||||||
video_id = url_basename(embed_url)
|
|
||||||
embed_page = self._download_webpage(embed_url, video_id)
|
|
||||||
embed_vars_json = self._search_regex(
|
|
||||||
r'(?s)var embedVars = (\{.*?\})\s*</script>', embed_page,
|
|
||||||
'embed vars')
|
|
||||||
info = self._parse_json(embed_vars_json, video_id)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for media in info['media']:
|
|
||||||
if media['mediaPurpose'] == 'play':
|
|
||||||
formats.append({
|
|
||||||
'url': media['uri'],
|
|
||||||
'height': media['height'],
|
|
||||||
'width:': media['width'],
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
|
||||||
'formats': formats,
|
|
||||||
'thumbnail': info.get('thumbUri'),
|
|
||||||
'description': self._og_search_description(webpage),
|
|
||||||
'duration': int_or_none(info.get('videoLengthInSeconds')),
|
|
||||||
'age_limit': parse_age_limit(info.get('audienceRating')),
|
|
||||||
}
|
|
@ -1073,20 +1073,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
# Contains a SMIL manifest
|
|
||||||
{
|
|
||||||
'url': 'http://www.telewebion.com/fa/1263668/%D9%82%D8%B1%D8%B9%D9%87%E2%80%8C%DA%A9%D8%B4%DB%8C-%D9%84%DB%8C%DA%AF-%D9%82%D9%87%D8%B1%D9%85%D8%A7%D9%86%D8%A7%D9%86-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7/%2B-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84.html',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'file',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': '+ Football: Lottery Champions League Europe',
|
|
||||||
'uploader': 'www.telewebion.com',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmpe downloads
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
# Brightcove URL in single quotes
|
# Brightcove URL in single quotes
|
||||||
{
|
{
|
||||||
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
|
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
|
||||||
@ -1105,12 +1091,17 @@ class GenericIE(InfoExtractor):
|
|||||||
# Dailymotion Cloud video
|
# Dailymotion Cloud video
|
||||||
{
|
{
|
||||||
'url': 'http://replay.publicsenat.fr/vod/le-debat/florent-kolandjian,dominique-cena,axel-decourtye,laurence-abeille,bruno-parmentier/175910',
|
'url': 'http://replay.publicsenat.fr/vod/le-debat/florent-kolandjian,dominique-cena,axel-decourtye,laurence-abeille,bruno-parmentier/175910',
|
||||||
'md5': '49444254273501a64675a7e68c502681',
|
'md5': 'dcaf23ad0c67a256f4278bce6e0bae38',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5585de919473990de4bee11b',
|
'id': 'x2uy8t3',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Le débat',
|
'title': 'Sauvons les abeilles ! - Le débat',
|
||||||
|
'description': 'md5:d9082128b1c5277987825d684939ca26',
|
||||||
'thumbnail': 're:^https?://.*\.jpe?g$',
|
'thumbnail': 're:^https?://.*\.jpe?g$',
|
||||||
|
'timestamp': 1434970506,
|
||||||
|
'upload_date': '20150622',
|
||||||
|
'uploader': 'Public Sénat',
|
||||||
|
'uploader_id': 'xa9gza',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
# OnionStudios embed
|
# OnionStudios embed
|
||||||
|
@ -12,7 +12,7 @@ from ..utils import (
|
|||||||
class ImdbIE(InfoExtractor):
|
class ImdbIE(InfoExtractor):
|
||||||
IE_NAME = 'imdb'
|
IE_NAME = 'imdb'
|
||||||
IE_DESC = 'Internet Movie Database trailers'
|
IE_DESC = 'Internet Movie Database trailers'
|
||||||
_VALID_URL = r'https?://(?:www|m)\.imdb\.com/video/[^/]+/vi(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www|m)\.imdb\.com/(?:video/[^/]+/|title/tt\d+.*?#lb-)vi(?P<id>\d+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.imdb.com/video/imdb/vi2524815897',
|
'url': 'http://www.imdb.com/video/imdb/vi2524815897',
|
||||||
@ -25,6 +25,12 @@ class ImdbIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://www.imdb.com/video/_/vi2524815897',
|
'url': 'http://www.imdb.com/video/_/vi2524815897',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.imdb.com/title/tt1667889/?ref_=ext_shr_eml_vi#lb-vi2524815897',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.imdb.com/title/tt1667889/#lb-vi2524815897',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -60,7 +60,8 @@ class IndavideoEmbedIE(InfoExtractor):
|
|||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'height': self._search_regex(r'\.(\d{3,4})\.mp4$', video_url, 'height', default=None),
|
'height': int_or_none(self._search_regex(
|
||||||
|
r'\.(\d{3,4})\.mp4(?:\?|$)', video_url, 'height', default=None)),
|
||||||
} for video_url in video_urls]
|
} for video_url in video_urls]
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ from ..utils import (
|
|||||||
int_or_none,
|
int_or_none,
|
||||||
limit_length,
|
limit_length,
|
||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -19,10 +20,16 @@ class InstagramIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'aye83DjauH',
|
'id': 'aye83DjauH',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'uploader_id': 'naomipq',
|
|
||||||
'title': 'Video by naomipq',
|
'title': 'Video by naomipq',
|
||||||
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
||||||
}
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'timestamp': 1371748545,
|
||||||
|
'upload_date': '20130620',
|
||||||
|
'uploader_id': 'naomipq',
|
||||||
|
'uploader': 'Naomi Leonor Phan-Quang',
|
||||||
|
'like_count': int,
|
||||||
|
'comment_count': int,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
# missing description
|
# missing description
|
||||||
'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
|
'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
|
||||||
@ -31,6 +38,13 @@ class InstagramIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'uploader_id': 'britneyspears',
|
'uploader_id': 'britneyspears',
|
||||||
'title': 'Video by britneyspears',
|
'title': 'Video by britneyspears',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'timestamp': 1453760977,
|
||||||
|
'upload_date': '20160125',
|
||||||
|
'uploader_id': 'britneyspears',
|
||||||
|
'uploader': 'Britney Spears',
|
||||||
|
'like_count': int,
|
||||||
|
'comment_count': int,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
@ -67,21 +81,57 @@ class InstagramIE(InfoExtractor):
|
|||||||
url = mobj.group('url')
|
url = mobj.group('url')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
|
||||||
|
(video_url, description, thumbnail, timestamp, uploader,
|
||||||
|
uploader_id, like_count, comment_count) = [None] * 8
|
||||||
|
|
||||||
|
shared_data = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'window\._sharedData\s*=\s*({.+?});',
|
||||||
|
webpage, 'shared data', default='{}'),
|
||||||
|
video_id, fatal=False)
|
||||||
|
if shared_data:
|
||||||
|
media = try_get(
|
||||||
|
shared_data, lambda x: x['entry_data']['PostPage'][0]['media'], dict)
|
||||||
|
if media:
|
||||||
|
video_url = media.get('video_url')
|
||||||
|
description = media.get('caption')
|
||||||
|
thumbnail = media.get('display_src')
|
||||||
|
timestamp = int_or_none(media.get('date'))
|
||||||
|
uploader = media.get('owner', {}).get('full_name')
|
||||||
|
uploader_id = media.get('owner', {}).get('username')
|
||||||
|
like_count = int_or_none(media.get('likes', {}).get('count'))
|
||||||
|
comment_count = int_or_none(media.get('comments', {}).get('count'))
|
||||||
|
|
||||||
|
if not video_url:
|
||||||
|
video_url = self._og_search_video_url(webpage, secure=False)
|
||||||
|
|
||||||
|
if not uploader_id:
|
||||||
|
uploader_id = self._search_regex(
|
||||||
|
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
|
||||||
webpage, 'uploader id', fatal=False)
|
webpage, 'uploader id', fatal=False)
|
||||||
desc = self._search_regex(
|
|
||||||
r'"caption":"(.+?)"', webpage, 'description', default=None)
|
if not description:
|
||||||
if desc is not None:
|
description = self._search_regex(
|
||||||
desc = lowercase_escape(desc)
|
r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
|
||||||
|
if description is not None:
|
||||||
|
description = lowercase_escape(description)
|
||||||
|
|
||||||
|
if not thumbnail:
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': self._og_search_video_url(webpage, secure=False),
|
'url': video_url,
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Video by %s' % uploader_id,
|
'title': 'Video by %s' % uploader_id,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'timestamp': timestamp,
|
||||||
'uploader_id': uploader_id,
|
'uploader_id': uploader_id,
|
||||||
'description': desc,
|
'uploader': uploader,
|
||||||
|
'like_count': like_count,
|
||||||
|
'comment_count': comment_count,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,30 +1,25 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import binascii
|
||||||
import hashlib
|
import hashlib
|
||||||
import itertools
|
import itertools
|
||||||
import math
|
import math
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
import uuid
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_parse_qs,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
decode_packed_codes,
|
decode_packed_codes,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
intlist_to_bytes,
|
||||||
ohdave_rsa_encrypt,
|
ohdave_rsa_encrypt,
|
||||||
remove_start,
|
remove_start,
|
||||||
sanitized_Request,
|
urshift,
|
||||||
urlencode_postdata,
|
|
||||||
url_basename,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -171,70 +166,21 @@ class IqiyiIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
|
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
|
||||||
'md5': '2cb594dc2781e6c941a110d8f358118b',
|
'md5': '470a6c160618577166db1a7aac5a3606',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
|
'id': '9c1fb1b99d192b21c559e5a1a2cb3c73',
|
||||||
|
'ext': 'mp4',
|
||||||
'title': '美国德州空中惊现奇异云团 酷似UFO',
|
'title': '美国德州空中惊现奇异云团 酷似UFO',
|
||||||
'ext': 'f4v',
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
|
'url': 'http://www.iqiyi.com/v_19rrhnnclk.html',
|
||||||
|
'md5': 'f09f0a6a59b2da66a26bf4eda669a4cc',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb',
|
'id': 'e3f585b550a280af23c98b6cb2be19fb',
|
||||||
'title': '名侦探柯南第752集',
|
'ext': 'mp4',
|
||||||
},
|
'title': '名侦探柯南 国语版',
|
||||||
'playlist': [{
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part1',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part2',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part3',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part4',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part5',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part6',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part7',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'e3f585b550a280af23c98b6cb2be19fb_part8',
|
|
||||||
'ext': 'f4v',
|
|
||||||
'title': '名侦探柯南第752集',
|
|
||||||
},
|
|
||||||
}],
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
},
|
||||||
|
'skip': 'Geo-restricted to China',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
|
'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -287,13 +233,6 @@ class IqiyiIE(InfoExtractor):
|
|||||||
('10', 'h1'),
|
('10', 'h1'),
|
||||||
]
|
]
|
||||||
|
|
||||||
AUTH_API_ERRORS = {
|
|
||||||
# No preview available (不允许试看鉴权失败)
|
|
||||||
'Q00505': 'This video requires a VIP account',
|
|
||||||
# End of preview time (试看结束鉴权失败)
|
|
||||||
'Q00506': 'Needs a VIP account for full video',
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
self._login()
|
self._login()
|
||||||
|
|
||||||
@ -352,177 +291,101 @@ class IqiyiIE(InfoExtractor):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _authenticate_vip_video(self, api_video_url, video_id, tvid, _uuid, do_report_warning):
|
@staticmethod
|
||||||
auth_params = {
|
def _gen_sc(tvid, timestamp):
|
||||||
# version and platform hard-coded in com/qiyi/player/core/model/remote/AuthenticationRemote.as
|
M = [1732584193, -271733879]
|
||||||
'version': '2.0',
|
M.extend([~M[0], ~M[1]])
|
||||||
'platform': 'b6c13e26323c537d',
|
I_table = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21]
|
||||||
'aid': tvid,
|
C_base = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8388608, 432]
|
||||||
|
|
||||||
|
def L(n, t):
|
||||||
|
if t is None:
|
||||||
|
t = 0
|
||||||
|
return trunc(((n >> 1) + (t >> 1) << 1) + (n & 1) + (t & 1))
|
||||||
|
|
||||||
|
def trunc(n):
|
||||||
|
n = n % 0x100000000
|
||||||
|
if n > 0x7fffffff:
|
||||||
|
n -= 0x100000000
|
||||||
|
return n
|
||||||
|
|
||||||
|
def transform(string, mod):
|
||||||
|
num = int(string, 16)
|
||||||
|
return (num >> 8 * (i % 4) & 255 ^ i % mod) << ((a & 3) << 3)
|
||||||
|
|
||||||
|
C = list(C_base)
|
||||||
|
o = list(M)
|
||||||
|
k = str(timestamp - 7)
|
||||||
|
for i in range(13):
|
||||||
|
a = i
|
||||||
|
C[a >> 2] |= ord(k[a]) << 8 * (a % 4)
|
||||||
|
|
||||||
|
for i in range(16):
|
||||||
|
a = i + 13
|
||||||
|
start = (i >> 2) * 8
|
||||||
|
r = '03967743b643f66763d623d637e30733'
|
||||||
|
C[a >> 2] |= transform(''.join(reversed(r[start:start + 8])), 7)
|
||||||
|
|
||||||
|
for i in range(16):
|
||||||
|
a = i + 29
|
||||||
|
start = (i >> 2) * 8
|
||||||
|
r = '7038766939776a32776a32706b337139'
|
||||||
|
C[a >> 2] |= transform(r[start:start + 8], 1)
|
||||||
|
|
||||||
|
for i in range(9):
|
||||||
|
a = i + 45
|
||||||
|
if i < len(tvid):
|
||||||
|
C[a >> 2] |= ord(tvid[i]) << 8 * (a % 4)
|
||||||
|
|
||||||
|
for a in range(64):
|
||||||
|
i = a
|
||||||
|
I = i >> 4
|
||||||
|
C_index = [i, 5 * i + 1, 3 * i + 5, 7 * i][I] % 16 + urshift(a, 6)
|
||||||
|
m = L(L(o[0], [
|
||||||
|
trunc(o[1] & o[2]) | trunc(~o[1] & o[3]),
|
||||||
|
trunc(o[3] & o[1]) | trunc(~o[3] & o[2]),
|
||||||
|
o[1] ^ o[2] ^ o[3],
|
||||||
|
o[2] ^ trunc(o[1] | ~o[3])
|
||||||
|
][I]), L(
|
||||||
|
trunc(int(abs(math.sin(i + 1)) * 4294967296)),
|
||||||
|
C[C_index] if C_index < len(C) else None))
|
||||||
|
I = I_table[4 * I + i % 4]
|
||||||
|
o = [o[3],
|
||||||
|
L(o[1], trunc(trunc(m << I) | urshift(m, 32 - I))),
|
||||||
|
o[1],
|
||||||
|
o[2]]
|
||||||
|
|
||||||
|
new_M = [L(o[0], M[0]), L(o[1], M[1]), L(o[2], M[2]), L(o[3], M[3])]
|
||||||
|
s = [new_M[a >> 3] >> (1 ^ a & 7) * 4 & 15 for a in range(32)]
|
||||||
|
return binascii.hexlify(intlist_to_bytes(s))[1::2].decode('ascii')
|
||||||
|
|
||||||
|
def get_raw_data(self, tvid, video_id):
|
||||||
|
tm = int(time.time() * 1000)
|
||||||
|
|
||||||
|
sc = self._gen_sc(tvid, tm)
|
||||||
|
params = {
|
||||||
|
'platForm': 'h5',
|
||||||
|
'rate': 1,
|
||||||
'tvid': tvid,
|
'tvid': tvid,
|
||||||
'uid': '',
|
|
||||||
'deviceId': _uuid,
|
|
||||||
'playType': 'main', # XXX: always main?
|
|
||||||
'filename': os.path.splitext(url_basename(api_video_url))[0],
|
|
||||||
}
|
|
||||||
|
|
||||||
qd_items = compat_parse_qs(compat_urllib_parse_urlparse(api_video_url).query)
|
|
||||||
for key, val in qd_items.items():
|
|
||||||
auth_params[key] = val[0]
|
|
||||||
|
|
||||||
auth_req = sanitized_Request(
|
|
||||||
'http://api.vip.iqiyi.com/services/ckn.action',
|
|
||||||
urlencode_postdata(auth_params))
|
|
||||||
# iQiyi server throws HTTP 405 error without the following header
|
|
||||||
auth_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
|
||||||
auth_result = self._download_json(
|
|
||||||
auth_req, video_id,
|
|
||||||
note='Downloading video authentication JSON',
|
|
||||||
errnote='Unable to download video authentication JSON')
|
|
||||||
|
|
||||||
code = auth_result.get('code')
|
|
||||||
msg = self.AUTH_API_ERRORS.get(code) or auth_result.get('msg') or code
|
|
||||||
if code == 'Q00506':
|
|
||||||
if do_report_warning:
|
|
||||||
self.report_warning(msg)
|
|
||||||
return False
|
|
||||||
if 'data' not in auth_result:
|
|
||||||
if msg is not None:
|
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, msg), expected=True)
|
|
||||||
raise ExtractorError('Unexpected error from Iqiyi auth API')
|
|
||||||
|
|
||||||
return auth_result['data']
|
|
||||||
|
|
||||||
def construct_video_urls(self, data, video_id, _uuid, tvid):
|
|
||||||
def do_xor(x, y):
|
|
||||||
a = y % 3
|
|
||||||
if a == 1:
|
|
||||||
return x ^ 121
|
|
||||||
if a == 2:
|
|
||||||
return x ^ 72
|
|
||||||
return x ^ 103
|
|
||||||
|
|
||||||
def get_encode_code(l):
|
|
||||||
a = 0
|
|
||||||
b = l.split('-')
|
|
||||||
c = len(b)
|
|
||||||
s = ''
|
|
||||||
for i in range(c - 1, -1, -1):
|
|
||||||
a = do_xor(int(b[c - i - 1], 16), i)
|
|
||||||
s += chr(a)
|
|
||||||
return s[::-1]
|
|
||||||
|
|
||||||
def get_path_key(x, format_id, segment_index):
|
|
||||||
mg = ')(*&^flash@#$%a'
|
|
||||||
tm = self._download_json(
|
|
||||||
'http://data.video.qiyi.com/t?tn=' + str(random.random()), video_id,
|
|
||||||
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
|
|
||||||
)['t']
|
|
||||||
t = str(int(math.floor(int(tm) / (600.0))))
|
|
||||||
return md5_text(t + mg + x)
|
|
||||||
|
|
||||||
video_urls_dict = {}
|
|
||||||
need_vip_warning_report = True
|
|
||||||
for format_item in data['vp']['tkl'][0]['vs']:
|
|
||||||
if 0 < int(format_item['bid']) <= 10:
|
|
||||||
format_id = self.get_format(format_item['bid'])
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
video_urls = []
|
|
||||||
|
|
||||||
video_urls_info = format_item['fs']
|
|
||||||
if not format_item['fs'][0]['l'].startswith('/'):
|
|
||||||
t = get_encode_code(format_item['fs'][0]['l'])
|
|
||||||
if t.endswith('mp4'):
|
|
||||||
video_urls_info = format_item['flvs']
|
|
||||||
|
|
||||||
for segment_index, segment in enumerate(video_urls_info):
|
|
||||||
vl = segment['l']
|
|
||||||
if not vl.startswith('/'):
|
|
||||||
vl = get_encode_code(vl)
|
|
||||||
is_vip_video = '/vip/' in vl
|
|
||||||
filesize = segment['b']
|
|
||||||
base_url = data['vp']['du'].split('/')
|
|
||||||
if not is_vip_video:
|
|
||||||
key = get_path_key(
|
|
||||||
vl.split('/')[-1].split('.')[0], format_id, segment_index)
|
|
||||||
base_url.insert(-1, key)
|
|
||||||
base_url = '/'.join(base_url)
|
|
||||||
param = {
|
|
||||||
'su': _uuid,
|
|
||||||
'qyid': uuid.uuid4().hex,
|
|
||||||
'client': '',
|
|
||||||
'z': '',
|
|
||||||
'bt': '',
|
|
||||||
'ct': '',
|
|
||||||
'tn': str(int(time.time()))
|
|
||||||
}
|
|
||||||
api_video_url = base_url + vl
|
|
||||||
if is_vip_video:
|
|
||||||
api_video_url = api_video_url.replace('.f4v', '.hml')
|
|
||||||
auth_result = self._authenticate_vip_video(
|
|
||||||
api_video_url, video_id, tvid, _uuid, need_vip_warning_report)
|
|
||||||
if auth_result is False:
|
|
||||||
need_vip_warning_report = False
|
|
||||||
break
|
|
||||||
param.update({
|
|
||||||
't': auth_result['t'],
|
|
||||||
# cid is hard-coded in com/qiyi/player/core/player/RuntimeData.as
|
|
||||||
'cid': 'afbe8fd3d73448c9',
|
|
||||||
'vid': video_id,
|
'vid': video_id,
|
||||||
'QY00001': auth_result['u'],
|
'cupid': 'qc_100001_100186',
|
||||||
})
|
'type': 'mp4',
|
||||||
api_video_url += '?' if '?' not in api_video_url else '&'
|
'nolimit': 0,
|
||||||
api_video_url += compat_urllib_parse_urlencode(param)
|
'agenttype': 13,
|
||||||
js = self._download_json(
|
'src': 'd846d0c32d664d32b6b54ea48997a589',
|
||||||
api_video_url, video_id,
|
'sc': sc,
|
||||||
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
|
't': tm - 7,
|
||||||
video_url = js['l']
|
'__jsT': None,
|
||||||
video_urls.append(
|
|
||||||
(video_url, filesize))
|
|
||||||
|
|
||||||
video_urls_dict[format_id] = video_urls
|
|
||||||
return video_urls_dict
|
|
||||||
|
|
||||||
def get_format(self, bid):
|
|
||||||
matched_format_ids = [_format_id for _bid, _format_id in self._FORMATS_MAP if _bid == str(bid)]
|
|
||||||
return matched_format_ids[0] if len(matched_format_ids) else None
|
|
||||||
|
|
||||||
def get_bid(self, format_id):
|
|
||||||
matched_bids = [_bid for _bid, _format_id in self._FORMATS_MAP if _format_id == format_id]
|
|
||||||
return matched_bids[0] if len(matched_bids) else None
|
|
||||||
|
|
||||||
def get_raw_data(self, tvid, video_id, enc_key, _uuid):
|
|
||||||
tm = str(int(time.time()))
|
|
||||||
tail = tm + tvid
|
|
||||||
param = {
|
|
||||||
'key': 'fvip',
|
|
||||||
'src': md5_text('youtube-dl'),
|
|
||||||
'tvId': tvid,
|
|
||||||
'vid': video_id,
|
|
||||||
'vinfo': 1,
|
|
||||||
'tm': tm,
|
|
||||||
'enc': md5_text(enc_key + tail),
|
|
||||||
'qyid': _uuid,
|
|
||||||
'tn': random.random(),
|
|
||||||
# In iQiyi's flash player, um is set to 1 if there's a logged user
|
|
||||||
# Some 1080P formats are only available with a logged user.
|
|
||||||
# Here force um=1 to trick the iQiyi server
|
|
||||||
'um': 1,
|
|
||||||
'authkey': md5_text(md5_text('') + tail),
|
|
||||||
'k_tag': 1,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
|
headers = {}
|
||||||
compat_urllib_parse_urlencode(param)
|
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
|
||||||
raw_data = self._download_json(api_url, video_id)
|
if cn_verification_proxy:
|
||||||
return raw_data
|
headers['Ytdl-request-proxy'] = cn_verification_proxy
|
||||||
|
return self._download_json(
|
||||||
def get_enc_key(self, video_id):
|
'http://cache.m.iqiyi.com/jp/tmts/%s/%s/' % (tvid, video_id),
|
||||||
# TODO: automatic key extraction
|
video_id, transform_source=lambda s: remove_start(s, 'var tvInfoJs='),
|
||||||
# last update at 2016-01-22 for Zombie::bite
|
query=params, headers=headers)
|
||||||
enc_key = '4a1caba4b4465345366f28da7c117d20'
|
|
||||||
return enc_key
|
|
||||||
|
|
||||||
def _extract_playlist(self, webpage):
|
def _extract_playlist(self, webpage):
|
||||||
PAGE_SIZE = 50
|
PAGE_SIZE = 50
|
||||||
@ -571,58 +434,27 @@ class IqiyiIE(InfoExtractor):
|
|||||||
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
|
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
|
r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
|
||||||
_uuid = uuid.uuid4().hex
|
|
||||||
|
|
||||||
enc_key = self.get_enc_key(video_id)
|
for _ in range(5):
|
||||||
|
raw_data = self.get_raw_data(tvid, video_id)
|
||||||
|
|
||||||
raw_data = self.get_raw_data(tvid, video_id, enc_key, _uuid)
|
if raw_data['code'] != 'A00000':
|
||||||
|
if raw_data['code'] == 'A00111':
|
||||||
if raw_data['code'] != 'A000000':
|
self.raise_geo_restricted()
|
||||||
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
|
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
|
||||||
|
|
||||||
data = raw_data['data']
|
data = raw_data['data']
|
||||||
|
|
||||||
title = data['vi']['vn']
|
# iQiYi sometimes returns Ads
|
||||||
|
if not isinstance(data['playInfo'], dict):
|
||||||
|
self._sleep(5, video_id)
|
||||||
|
continue
|
||||||
|
|
||||||
# generate video_urls_dict
|
title = data['playInfo']['an']
|
||||||
video_urls_dict = self.construct_video_urls(
|
break
|
||||||
data, video_id, _uuid, tvid)
|
|
||||||
|
|
||||||
# construct info
|
return {
|
||||||
entries = []
|
|
||||||
for format_id in video_urls_dict:
|
|
||||||
video_urls = video_urls_dict[format_id]
|
|
||||||
for i, video_url_info in enumerate(video_urls):
|
|
||||||
if len(entries) < i + 1:
|
|
||||||
entries.append({'formats': []})
|
|
||||||
entries[i]['formats'].append(
|
|
||||||
{
|
|
||||||
'url': video_url_info[0],
|
|
||||||
'filesize': video_url_info[-1],
|
|
||||||
'format_id': format_id,
|
|
||||||
'preference': int(self.get_bid(format_id))
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for i in range(len(entries)):
|
|
||||||
self._sort_formats(entries[i]['formats'])
|
|
||||||
entries[i].update(
|
|
||||||
{
|
|
||||||
'id': '%s_part%d' % (video_id, i + 1),
|
|
||||||
'title': title,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(entries) > 1:
|
|
||||||
info = {
|
|
||||||
'_type': 'multi_video',
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'entries': entries,
|
'url': data['m3u'],
|
||||||
}
|
}
|
||||||
else:
|
|
||||||
info = entries[0]
|
|
||||||
info['id'] = video_id
|
|
||||||
info['title'] = title
|
|
||||||
|
|
||||||
return info
|
|
||||||
|
@ -12,9 +12,35 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class JWPlatformBaseIE(InfoExtractor):
|
class JWPlatformBaseIE(InfoExtractor):
|
||||||
|
@staticmethod
|
||||||
|
def _find_jwplayer_data(webpage):
|
||||||
|
# TODO: Merge this with JWPlayer-related codes in generic.py
|
||||||
|
|
||||||
|
mobj = re.search(
|
||||||
|
'jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)\.setup\((?P<options>[^)]+)\)',
|
||||||
|
webpage)
|
||||||
|
if mobj:
|
||||||
|
return mobj.group('options')
|
||||||
|
|
||||||
|
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
|
||||||
|
jwplayer_data = self._parse_json(
|
||||||
|
self._find_jwplayer_data(webpage), video_id)
|
||||||
|
return self._parse_jwplayer_data(
|
||||||
|
jwplayer_data, video_id, *args, **kwargs)
|
||||||
|
|
||||||
def _parse_jwplayer_data(self, jwplayer_data, video_id, require_title=True, m3u8_id=None, rtmp_params=None):
|
def _parse_jwplayer_data(self, jwplayer_data, video_id, require_title=True, m3u8_id=None, rtmp_params=None):
|
||||||
|
# JWPlayer backward compatibility: flattened playlists
|
||||||
|
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
|
||||||
|
if 'playlist' not in jwplayer_data:
|
||||||
|
jwplayer_data = {'playlist': [jwplayer_data]}
|
||||||
|
|
||||||
video_data = jwplayer_data['playlist'][0]
|
video_data = jwplayer_data['playlist'][0]
|
||||||
|
|
||||||
|
# JWPlayer backward compatibility: flattened sources
|
||||||
|
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
|
||||||
|
if 'sources' not in video_data:
|
||||||
|
video_data['sources'] = [video_data]
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for source in video_data['sources']:
|
for source in video_data['sources']:
|
||||||
source_url = self._proto_relative_url(source['file'])
|
source_url = self._proto_relative_url(source['file'])
|
||||||
|
@ -148,8 +148,8 @@ class KuwoAlbumIE(InfoExtractor):
|
|||||||
'url': 'http://www.kuwo.cn/album/502294/',
|
'url': 'http://www.kuwo.cn/album/502294/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '502294',
|
'id': '502294',
|
||||||
'title': 'M',
|
'title': 'Made\xa0Series\xa0《M》',
|
||||||
'description': 'md5:6a7235a84cc6400ec3b38a7bdaf1d60c',
|
'description': 'md5:d463f0d8a0ff3c3ea3d6ed7452a9483f',
|
||||||
},
|
},
|
||||||
'playlist_count': 2,
|
'playlist_count': 2,
|
||||||
}
|
}
|
||||||
@ -209,7 +209,7 @@ class KuwoSingerIE(InfoExtractor):
|
|||||||
'url': 'http://www.kuwo.cn/mingxing/bruno+mars/',
|
'url': 'http://www.kuwo.cn/mingxing/bruno+mars/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'bruno+mars',
|
'id': 'bruno+mars',
|
||||||
'title': 'Bruno Mars',
|
'title': 'Bruno\xa0Mars',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 329,
|
'playlist_mincount': 329,
|
||||||
}, {
|
}, {
|
||||||
@ -306,7 +306,7 @@ class KuwoMvIE(KuwoBaseIE):
|
|||||||
'id': '6480076',
|
'id': '6480076',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'My HouseMV',
|
'title': 'My HouseMV',
|
||||||
'creator': '2PM',
|
'creator': 'PM02:00',
|
||||||
},
|
},
|
||||||
# In this video, music URLs (anti.s) are blocked outside China and
|
# In this video, music URLs (anti.s) are blocked outside China and
|
||||||
# USA, while the MV URL (mvurl) is available globally, so force the MV
|
# USA, while the MV URL (mvurl) is available globally, so force the MV
|
||||||
|
@ -23,12 +23,13 @@ from ..utils import (
|
|||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
url_basename,
|
url_basename,
|
||||||
|
urshift,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class LeIE(InfoExtractor):
|
class LeIE(InfoExtractor):
|
||||||
IE_DESC = '乐视网'
|
IE_DESC = '乐视网'
|
||||||
_VALID_URL = r'https?://www\.le\.com/ptv/vplay/(?P<id>\d+)\.html'
|
_VALID_URL = r'https?://(?:www\.le\.com/ptv/vplay|sports\.le\.com/video)/(?P<id>\d+)\.html'
|
||||||
|
|
||||||
_URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html'
|
_URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html'
|
||||||
|
|
||||||
@ -69,17 +70,16 @@ class LeIE(InfoExtractor):
|
|||||||
'hls_prefer_native': True,
|
'hls_prefer_native': True,
|
||||||
},
|
},
|
||||||
'skip': 'Only available in China',
|
'skip': 'Only available in China',
|
||||||
|
}, {
|
||||||
|
'url': 'http://sports.le.com/video/25737697.html',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def urshift(val, n):
|
|
||||||
return val >> n if val >= 0 else (val + 0x100000000) >> n
|
|
||||||
|
|
||||||
# ror() and calc_time_key() are reversed from a embedded swf file in KLetvPlayer.swf
|
# ror() and calc_time_key() are reversed from a embedded swf file in KLetvPlayer.swf
|
||||||
def ror(self, param1, param2):
|
def ror(self, param1, param2):
|
||||||
_loc3_ = 0
|
_loc3_ = 0
|
||||||
while _loc3_ < param2:
|
while _loc3_ < param2:
|
||||||
param1 = self.urshift(param1, 1) + ((param1 & 1) << 31)
|
param1 = urshift(param1, 1) + ((param1 & 1) << 31)
|
||||||
_loc3_ += 1
|
_loc3_ += 1
|
||||||
return param1
|
return param1
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ class LeIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class LePlaylistIE(InfoExtractor):
|
class LePlaylistIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://[a-z]+\.le\.com/[a-z]+/(?P<id>[a-z0-9_]+)'
|
_VALID_URL = r'https?://[a-z]+\.le\.com/(?!video)[a-z]+/(?P<id>[a-z0-9_]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.le.com/tv/46177.html',
|
'url': 'http://www.le.com/tv/46177.html',
|
||||||
|
@ -98,13 +98,19 @@ class LimelightBaseIE(InfoExtractor):
|
|||||||
} for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')]
|
} for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')]
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
for caption in properties.get('captions', {}):
|
for caption in properties.get('captions', []):
|
||||||
lang = caption.get('language_code')
|
lang = caption.get('language_code')
|
||||||
subtitles_url = caption.get('url')
|
subtitles_url = caption.get('url')
|
||||||
if lang and subtitles_url:
|
if lang and subtitles_url:
|
||||||
subtitles[lang] = [{
|
subtitles.setdefault(lang, []).append({
|
||||||
'url': subtitles_url,
|
'url': subtitles_url,
|
||||||
}]
|
})
|
||||||
|
closed_captions_url = properties.get('closed_captions_url')
|
||||||
|
if closed_captions_url:
|
||||||
|
subtitles.setdefault('en', []).append({
|
||||||
|
'url': closed_captions_url,
|
||||||
|
'ext': 'ttml',
|
||||||
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
@ -123,7 +129,18 @@ class LimelightBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
class LimelightMediaIE(LimelightBaseIE):
|
class LimelightMediaIE(LimelightBaseIE):
|
||||||
IE_NAME = 'limelight'
|
IE_NAME = 'limelight'
|
||||||
_VALID_URL = r'(?:limelight:media:|https?://link\.videoplatform\.limelight\.com/media/\??\bmediaId=)(?P<id>[a-z0-9]{32})'
|
_VALID_URL = r'''(?x)
|
||||||
|
(?:
|
||||||
|
limelight:media:|
|
||||||
|
https?://
|
||||||
|
(?:
|
||||||
|
link\.videoplatform\.limelight\.com/media/|
|
||||||
|
assets\.delvenetworks\.com/player/loader\.swf
|
||||||
|
)
|
||||||
|
\?.*?\bmediaId=
|
||||||
|
)
|
||||||
|
(?P<id>[a-z0-9]{32})
|
||||||
|
'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86',
|
'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -158,6 +175,9 @@ class LimelightMediaIE(LimelightBaseIE):
|
|||||||
# rtmp download
|
# rtmp download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://assets.delvenetworks.com/player/loader.swf?mediaId=8018a574f08d416e95ceaccae4ba0452',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
_PLAYLIST_SERVICE_PATH = 'media'
|
_PLAYLIST_SERVICE_PATH = 'media'
|
||||||
_API_PATH = 'media'
|
_API_PATH = 'media'
|
||||||
@ -176,15 +196,29 @@ class LimelightMediaIE(LimelightBaseIE):
|
|||||||
|
|
||||||
class LimelightChannelIE(LimelightBaseIE):
|
class LimelightChannelIE(LimelightBaseIE):
|
||||||
IE_NAME = 'limelight:channel'
|
IE_NAME = 'limelight:channel'
|
||||||
_VALID_URL = r'(?:limelight:channel:|https?://link\.videoplatform\.limelight\.com/media/\??\bchannelId=)(?P<id>[a-z0-9]{32})'
|
_VALID_URL = r'''(?x)
|
||||||
_TEST = {
|
(?:
|
||||||
|
limelight:channel:|
|
||||||
|
https?://
|
||||||
|
(?:
|
||||||
|
link\.videoplatform\.limelight\.com/media/|
|
||||||
|
assets\.delvenetworks\.com/player/loader\.swf
|
||||||
|
)
|
||||||
|
\?.*?\bchannelId=
|
||||||
|
)
|
||||||
|
(?P<id>[a-z0-9]{32})
|
||||||
|
'''
|
||||||
|
_TESTS = [{
|
||||||
'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082',
|
'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'ab6a524c379342f9b23642917020c082',
|
'id': 'ab6a524c379342f9b23642917020c082',
|
||||||
'title': 'Javascript Sample Code',
|
'title': 'Javascript Sample Code',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 3,
|
'playlist_mincount': 3,
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://assets.delvenetworks.com/player/loader.swf?channelId=ab6a524c379342f9b23642917020c082',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
_PLAYLIST_SERVICE_PATH = 'channel'
|
_PLAYLIST_SERVICE_PATH = 'channel'
|
||||||
_API_PATH = 'channels'
|
_API_PATH = 'channels'
|
||||||
|
|
||||||
@ -207,15 +241,29 @@ class LimelightChannelIE(LimelightBaseIE):
|
|||||||
|
|
||||||
class LimelightChannelListIE(LimelightBaseIE):
|
class LimelightChannelListIE(LimelightBaseIE):
|
||||||
IE_NAME = 'limelight:channel_list'
|
IE_NAME = 'limelight:channel_list'
|
||||||
_VALID_URL = r'(?:limelight:channel_list:|https?://link\.videoplatform\.limelight\.com/media/\?.*?\bchannelListId=)(?P<id>[a-z0-9]{32})'
|
_VALID_URL = r'''(?x)
|
||||||
_TEST = {
|
(?:
|
||||||
|
limelight:channel_list:|
|
||||||
|
https?://
|
||||||
|
(?:
|
||||||
|
link\.videoplatform\.limelight\.com/media/|
|
||||||
|
assets\.delvenetworks\.com/player/loader\.swf
|
||||||
|
)
|
||||||
|
\?.*?\bchannelListId=
|
||||||
|
)
|
||||||
|
(?P<id>[a-z0-9]{32})
|
||||||
|
'''
|
||||||
|
_TESTS = [{
|
||||||
'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b',
|
'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '301b117890c4465c8179ede21fd92e2b',
|
'id': '301b117890c4465c8179ede21fd92e2b',
|
||||||
'title': 'Website - Hero Player',
|
'title': 'Website - Hero Player',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 2,
|
'playlist_mincount': 2,
|
||||||
}
|
}, {
|
||||||
|
'url': 'https://assets.delvenetworks.com/player/loader.swf?channelListId=301b117890c4465c8179ede21fd92e2b',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
_PLAYLIST_SERVICE_PATH = 'channel_list'
|
_PLAYLIST_SERVICE_PATH = 'channel_list'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -95,7 +95,6 @@ class LyndaIE(LyndaBaseIE):
|
|||||||
IE_NAME = 'lynda'
|
IE_NAME = 'lynda'
|
||||||
IE_DESC = 'lynda.com videos'
|
IE_DESC = 'lynda.com videos'
|
||||||
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
|
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
|
||||||
_NETRC_MACHINE = 'lynda'
|
|
||||||
|
|
||||||
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
||||||
|
|
||||||
|
@ -4,16 +4,12 @@ from __future__ import unicode_literals
|
|||||||
import random
|
import random
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_urlencode
|
from ..utils import xpath_text
|
||||||
from ..utils import (
|
|
||||||
sanitized_Request,
|
|
||||||
xpath_text,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MatchTVIE(InfoExtractor):
|
class MatchTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://matchtv\.ru/?#live-player'
|
_VALID_URL = r'https?://matchtv\.ru(?:/on-air|/?#live-player)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://matchtv.ru/#live-player',
|
'url': 'http://matchtv.ru/#live-player',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'matchtv-live',
|
'id': 'matchtv-live',
|
||||||
@ -24,12 +20,16 @@ class MatchTVIE(InfoExtractor):
|
|||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://matchtv.ru/on-air/',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = 'matchtv-live'
|
video_id = 'matchtv-live'
|
||||||
request = sanitized_Request(
|
video_url = self._download_json(
|
||||||
'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse_urlencode({
|
'http://player.matchtv.ntvplus.tv/player/smil', video_id,
|
||||||
|
query={
|
||||||
'ts': '',
|
'ts': '',
|
||||||
'quality': 'SD',
|
'quality': 'SD',
|
||||||
'contentId': '561d2c0df7159b37178b4567',
|
'contentId': '561d2c0df7159b37178b4567',
|
||||||
@ -40,11 +40,10 @@ class MatchTVIE(InfoExtractor):
|
|||||||
'contentType': 'channel',
|
'contentType': 'channel',
|
||||||
'timeShift': '0',
|
'timeShift': '0',
|
||||||
'platform': 'portal',
|
'platform': 'portal',
|
||||||
}),
|
},
|
||||||
headers={
|
headers={
|
||||||
'Referer': 'http://player.matchtv.ntvplus.tv/embed-player/NTVEmbedPlayer.swf',
|
'Referer': 'http://player.matchtv.ntvplus.tv/embed-player/NTVEmbedPlayer.swf',
|
||||||
})
|
})['data']['videoUrl']
|
||||||
video_url = self._download_json(request, video_id)['data']['videoUrl']
|
|
||||||
f4m_url = xpath_text(self._download_xml(video_url, video_id), './to')
|
f4m_url = xpath_text(self._download_xml(video_url, video_id), './to')
|
||||||
formats = self._extract_f4m_formats(f4m_url, video_id)
|
formats = self._extract_f4m_formats(f4m_url, video_id)
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
@ -8,6 +11,7 @@ from ..compat import (
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
remove_start,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -15,7 +19,7 @@ class MiTeleIE(InfoExtractor):
|
|||||||
IE_DESC = 'mitele.es'
|
IE_DESC = 'mitele.es'
|
||||||
_VALID_URL = r'https?://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
|
_VALID_URL = r'https?://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
|
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
|
||||||
# MD5 is unstable
|
# MD5 is unstable
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -24,10 +28,31 @@ class MiTeleIE(InfoExtractor):
|
|||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': 'Tor, la web invisible',
|
'title': 'Tor, la web invisible',
|
||||||
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
|
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
|
||||||
|
'series': 'Diario de',
|
||||||
|
'season': 'La redacción',
|
||||||
|
'episode': 'Programa 144',
|
||||||
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
||||||
'duration': 2913,
|
'duration': 2913,
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
# no explicit title
|
||||||
|
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/temporada-6/programa-226/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'eLZSwoEd1S3pVyUm8lc6F',
|
||||||
|
'display_id': 'programa-226',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Cuarto Milenio - Temporada 6 - Programa 226',
|
||||||
|
'description': 'md5:50daf9fadefa4e62d9fc866d0c015701',
|
||||||
|
'series': 'Cuarto Milenio',
|
||||||
|
'season': 'Temporada 6',
|
||||||
|
'episode': 'Programa 226',
|
||||||
|
'thumbnail': 're:(?i)^https?://.*\.jpg$',
|
||||||
|
'duration': 7312,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
@ -70,7 +95,22 @@ class MiTeleIE(InfoExtractor):
|
|||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
title = self._search_regex(
|
title = self._search_regex(
|
||||||
r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>', webpage, 'title')
|
r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>',
|
||||||
|
webpage, 'title', default=None)
|
||||||
|
|
||||||
|
mobj = re.search(r'''(?sx)
|
||||||
|
class="Destacado-text"[^>]*>.*?<h1>\s*
|
||||||
|
<span>(?P<series>[^<]+)</span>\s*
|
||||||
|
<span>(?P<season>[^<]+)</span>\s*
|
||||||
|
<span>(?P<episode>[^<]+)</span>''', webpage)
|
||||||
|
series, season, episode = mobj.groups() if mobj else [None] * 3
|
||||||
|
|
||||||
|
if not title:
|
||||||
|
if mobj:
|
||||||
|
title = '%s - %s - %s' % (series, season, episode)
|
||||||
|
else:
|
||||||
|
title = remove_start(self._search_regex(
|
||||||
|
r'<title>([^<]+)</title>', webpage, 'title'), 'Ver online ')
|
||||||
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'data-media-id\s*=\s*"([^"]+)"', webpage,
|
r'data-media-id\s*=\s*"([^"]+)"', webpage,
|
||||||
@ -83,6 +123,9 @@ class MiTeleIE(InfoExtractor):
|
|||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': get_element_by_attribute('class', 'text', webpage),
|
'description': get_element_by_attribute('class', 'text', webpage),
|
||||||
|
'series': series,
|
||||||
|
'season': season,
|
||||||
|
'episode': episode,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
@ -102,11 +102,11 @@ class MixcloudIE(InfoExtractor):
|
|||||||
description = self._og_search_description(webpage)
|
description = self._og_search_description(webpage)
|
||||||
like_count = parse_count(self._search_regex(
|
like_count = parse_count(self._search_regex(
|
||||||
r'\bbutton-favorite[^>]+>.*?<span[^>]+class=["\']toggle-number[^>]+>\s*([^<]+)',
|
r'\bbutton-favorite[^>]+>.*?<span[^>]+class=["\']toggle-number[^>]+>\s*([^<]+)',
|
||||||
webpage, 'like count', fatal=False))
|
webpage, 'like count', default=None))
|
||||||
view_count = str_to_int(self._search_regex(
|
view_count = str_to_int(self._search_regex(
|
||||||
[r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"',
|
[r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"',
|
||||||
r'/listeners/?">([0-9,.]+)</a>'],
|
r'/listeners/?">([0-9,.]+)</a>'],
|
||||||
webpage, 'play count', fatal=False))
|
webpage, 'play count', default=None))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': track_id,
|
'id': track_id,
|
||||||
|
119
youtube_dl/extractor/msn.py
Normal file
119
youtube_dl/extractor/msn.py
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
ExtractorError,
|
||||||
|
int_or_none,
|
||||||
|
unescapeHTML,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MSNIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/criminal-minds-shemar-moore-shares-a-touching-goodbye-message/vp-BBqQYNE',
|
||||||
|
'md5': '8442f66c116cbab1ff7098f986983458',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'BBqQYNE',
|
||||||
|
'display_id': 'criminal-minds-shemar-moore-shares-a-touching-goodbye-message',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Criminal Minds - Shemar Moore Shares A Touching Goodbye Message',
|
||||||
|
'description': 'md5:e8e89b897b222eb33a6b5067a8f1bc25',
|
||||||
|
'duration': 104,
|
||||||
|
'uploader': 'CBS Entertainment',
|
||||||
|
'uploader_id': 'IT0X5aoJ6bJgYerJXSDCgFmYPB1__54v',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.msn.com/en-ae/news/offbeat/meet-the-nine-year-old-self-made-millionaire/ar-BBt6ZKf',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.msn.com/en-ae/video/watch/obama-a-lot-of-people-will-be-disappointed/vi-AAhxUMH',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# geo restricted
|
||||||
|
'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/the-first-fart-makes-you-laugh-the-last-fart-makes-you-cry/vp-AAhzIBU',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id, display_id = mobj.group('id', 'display_id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
video = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'data-metadata\s*=\s*(["\'])(?P<data>.+?)\1',
|
||||||
|
webpage, 'video data', default='{}', group='data'),
|
||||||
|
display_id, transform_source=unescapeHTML)
|
||||||
|
|
||||||
|
if not video:
|
||||||
|
error = unescapeHTML(self._search_regex(
|
||||||
|
r'data-error=(["\'])(?P<error>.+?)\1',
|
||||||
|
webpage, 'error', group='error'))
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)
|
||||||
|
|
||||||
|
title = video['title']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for file_ in video.get('videoFiles', []):
|
||||||
|
format_url = file_.get('url')
|
||||||
|
if not format_url:
|
||||||
|
continue
|
||||||
|
ext = determine_ext(format_url)
|
||||||
|
# .ism is not yet supported (see
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/8118)
|
||||||
|
if ext == 'ism':
|
||||||
|
continue
|
||||||
|
if 'm3u8' in format_url:
|
||||||
|
# m3u8_native should not be used here until
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/9913 is fixed
|
||||||
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
format_url, display_id, 'mp4',
|
||||||
|
m3u8_id='hls', fatal=False)
|
||||||
|
# Despite metadata in m3u8 all video+audio formats are
|
||||||
|
# actually video-only (no audio)
|
||||||
|
for f in m3u8_formats:
|
||||||
|
if f.get('acodec') != 'none' and f.get('vcodec') != 'none':
|
||||||
|
f['acodec'] = 'none'
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'url': format_url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'format_id': 'http',
|
||||||
|
'width': int_or_none(file_.get('width')),
|
||||||
|
'height': int_or_none(file_.get('height')),
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for file_ in video.get('files', []):
|
||||||
|
format_url = file_.get('url')
|
||||||
|
format_code = file_.get('formatCode')
|
||||||
|
if not format_url or not format_code:
|
||||||
|
continue
|
||||||
|
if compat_str(format_code) == '3100':
|
||||||
|
subtitles.setdefault(file_.get('culture', 'en'), []).append({
|
||||||
|
'ext': determine_ext(format_url, 'ttml'),
|
||||||
|
'url': format_url,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': video.get('description'),
|
||||||
|
'thumbnail': video.get('headlineImage', {}).get('url'),
|
||||||
|
'duration': int_or_none(video.get('durationSecs')),
|
||||||
|
'uploader': video.get('sourceFriendly'),
|
||||||
|
'uploader_id': video.get('providerId'),
|
||||||
|
'creator': video.get('creator'),
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
@ -6,6 +6,7 @@ from .common import InfoExtractor
|
|||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_str,
|
compat_str,
|
||||||
|
compat_xpath,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -84,9 +85,10 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
|||||||
rtmp_video_url = rendition.find('./src').text
|
rtmp_video_url = rendition.find('./src').text
|
||||||
if rtmp_video_url.endswith('siteunavail.png'):
|
if rtmp_video_url.endswith('siteunavail.png'):
|
||||||
continue
|
continue
|
||||||
|
new_url = self._transform_rtmp_url(rtmp_video_url)
|
||||||
formats.append({
|
formats.append({
|
||||||
'ext': ext,
|
'ext': 'flv' if new_url.startswith('rtmp') else ext,
|
||||||
'url': self._transform_rtmp_url(rtmp_video_url),
|
'url': new_url,
|
||||||
'format_id': rendition.get('bitrate'),
|
'format_id': rendition.get('bitrate'),
|
||||||
'width': int(rendition.get('width')),
|
'width': int(rendition.get('width')),
|
||||||
'height': int(rendition.get('height')),
|
'height': int(rendition.get('height')),
|
||||||
@ -139,9 +141,9 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
|||||||
itemdoc, './/{http://search.yahoo.com/mrss/}category',
|
itemdoc, './/{http://search.yahoo.com/mrss/}category',
|
||||||
'scheme', 'urn:mtvn:video_title')
|
'scheme', 'urn:mtvn:video_title')
|
||||||
if title_el is None:
|
if title_el is None:
|
||||||
title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
|
title_el = itemdoc.find(compat_xpath('.//{http://search.yahoo.com/mrss/}title'))
|
||||||
if title_el is None:
|
if title_el is None:
|
||||||
title_el = itemdoc.find('.//title') or itemdoc.find('./title')
|
title_el = itemdoc.find(compat_xpath('.//title'))
|
||||||
if title_el.text is None:
|
if title_el.text is None:
|
||||||
title_el = None
|
title_el = None
|
||||||
|
|
||||||
|
@ -9,10 +9,6 @@ from ..utils import (
|
|||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
update_url_query,
|
|
||||||
int_or_none,
|
|
||||||
HEADRequest,
|
|
||||||
parse_iso8601,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -192,9 +188,9 @@ class CSNNEIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class NBCNewsIE(ThePlatformIE):
|
class NBCNewsIE(ThePlatformIE):
|
||||||
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:nbcnews|today)\.com/
|
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/
|
||||||
(?:video/.+?/(?P<id>\d+)|
|
(?:video/.+?/(?P<id>\d+)|
|
||||||
([^/]+/)*(?P<display_id>[^/?]+))
|
([^/]+/)*(?:.*-)?(?P<mpx_id>[^/?]+))
|
||||||
'''
|
'''
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
@ -216,13 +212,16 @@ class NBCNewsIE(ThePlatformIE):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'How Twitter Reacted To The Snowden Interview',
|
'title': 'How Twitter Reacted To The Snowden Interview',
|
||||||
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
|
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
|
||||||
|
'uploader': 'NBCU-NEWS',
|
||||||
|
'timestamp': 1401363060,
|
||||||
|
'upload_date': '20140529',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156',
|
'url': 'http://www.nbcnews.com/feature/dateline-full-episodes/full-episode-family-business-n285156',
|
||||||
'md5': 'fdbf39ab73a72df5896b6234ff98518a',
|
'md5': 'fdbf39ab73a72df5896b6234ff98518a',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'Wjf9EDR3A_60',
|
'id': '529953347624',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'FULL EPISODE: Family Business',
|
'title': 'FULL EPISODE: Family Business',
|
||||||
'description': 'md5:757988edbaae9d7be1d585eb5d55cc04',
|
'description': 'md5:757988edbaae9d7be1d585eb5d55cc04',
|
||||||
@ -237,6 +236,9 @@ class NBCNewsIE(ThePlatformIE):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Nightly News with Brian Williams Full Broadcast (February 4)',
|
'title': 'Nightly News with Brian Williams Full Broadcast (February 4)',
|
||||||
'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
|
'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
|
||||||
|
'timestamp': 1423104900,
|
||||||
|
'uploader': 'NBCU-NEWS',
|
||||||
|
'upload_date': '20150205',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -245,10 +247,12 @@ class NBCNewsIE(ThePlatformIE):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '529953347624',
|
'id': '529953347624',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Volkswagen U.S. Chief: We \'Totally Screwed Up\'',
|
'title': 'Volkswagen U.S. Chief:\xa0 We Have Totally Screwed Up',
|
||||||
'description': 'md5:d22d1281a24f22ea0880741bb4dd6301',
|
'description': 'md5:c8be487b2d80ff0594c005add88d8351',
|
||||||
|
'upload_date': '20150922',
|
||||||
|
'timestamp': 1442917800,
|
||||||
|
'uploader': 'NBCU-NEWS',
|
||||||
},
|
},
|
||||||
'expected_warnings': ['http-6000 is not available']
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788',
|
'url': 'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788',
|
||||||
@ -260,6 +264,22 @@ class NBCNewsIE(ThePlatformIE):
|
|||||||
'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1',
|
'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1',
|
||||||
'upload_date': '20160420',
|
'upload_date': '20160420',
|
||||||
'timestamp': 1461152093,
|
'timestamp': 1461152093,
|
||||||
|
'uploader': 'NBCU-NEWS',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
|
||||||
|
'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '314487875924',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'The chaotic GOP immigration vote',
|
||||||
|
'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'timestamp': 1406937606,
|
||||||
|
'upload_date': '20140802',
|
||||||
|
'uploader': 'NBCU-NEWS',
|
||||||
|
'categories': ['MSNBC/Topics/Franchise/Best of last night', 'MSNBC/Topics/General/Congress'],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -290,15 +310,16 @@ class NBCNewsIE(ThePlatformIE):
|
|||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
# "feature" and "nightly-news" pages use theplatform.com
|
# "feature" and "nightly-news" pages use theplatform.com
|
||||||
display_id = mobj.group('display_id')
|
video_id = mobj.group('mpx_id')
|
||||||
webpage = self._download_webpage(url, display_id)
|
if not video_id.isdigit():
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
info = None
|
info = None
|
||||||
bootstrap_json = self._search_regex(
|
bootstrap_json = self._search_regex(
|
||||||
[r'(?m)(?:var\s+(?:bootstrapJson|playlistData)|NEWS\.videoObj)\s*=\s*({.+});?\s*$',
|
[r'(?m)(?:var\s+(?:bootstrapJson|playlistData)|NEWS\.videoObj)\s*=\s*({.+});?\s*$',
|
||||||
r'videoObj\s*:\s*({.+})', r'data-video="([^"]+)"'],
|
r'videoObj\s*:\s*({.+})', r'data-video="([^"]+)"'],
|
||||||
webpage, 'bootstrap json', default=None)
|
webpage, 'bootstrap json', default=None)
|
||||||
bootstrap = self._parse_json(
|
bootstrap = self._parse_json(
|
||||||
bootstrap_json, display_id, transform_source=unescapeHTML)
|
bootstrap_json, video_id, transform_source=unescapeHTML)
|
||||||
if 'results' in bootstrap:
|
if 'results' in bootstrap:
|
||||||
info = bootstrap['results'][0]['video']
|
info = bootstrap['results'][0]['video']
|
||||||
elif 'video' in bootstrap:
|
elif 'video' in bootstrap:
|
||||||
@ -306,89 +327,11 @@ class NBCNewsIE(ThePlatformIE):
|
|||||||
else:
|
else:
|
||||||
info = bootstrap
|
info = bootstrap
|
||||||
video_id = info['mpxId']
|
video_id = info['mpxId']
|
||||||
title = info['title']
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
caption_links = info.get('captionLinks')
|
|
||||||
if caption_links:
|
|
||||||
for (sub_key, sub_ext) in (('smpte-tt', 'ttml'), ('web-vtt', 'vtt'), ('srt', 'srt')):
|
|
||||||
sub_url = caption_links.get(sub_key)
|
|
||||||
if sub_url:
|
|
||||||
subtitles.setdefault('en', []).append({
|
|
||||||
'url': sub_url,
|
|
||||||
'ext': sub_ext,
|
|
||||||
})
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for video_asset in info['videoAssets']:
|
|
||||||
video_url = video_asset.get('publicUrl')
|
|
||||||
if not video_url:
|
|
||||||
continue
|
|
||||||
container = video_asset.get('format')
|
|
||||||
asset_type = video_asset.get('assetType') or ''
|
|
||||||
if container == 'ISM' or asset_type == 'FireTV-Once':
|
|
||||||
continue
|
|
||||||
elif asset_type == 'OnceURL':
|
|
||||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
|
||||||
video_url, video_id)
|
|
||||||
formats.extend(tp_formats)
|
|
||||||
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
|
||||||
else:
|
|
||||||
tbr = int_or_none(video_asset.get('bitRate') or video_asset.get('bitrate'), 1000)
|
|
||||||
format_id = 'http%s' % ('-%d' % tbr if tbr else '')
|
|
||||||
video_url = update_url_query(
|
|
||||||
video_url, {'format': 'redirect'})
|
|
||||||
# resolve the url so that we can check availability and detect the correct extension
|
|
||||||
head = self._request_webpage(
|
|
||||||
HEADRequest(video_url), video_id,
|
|
||||||
'Checking %s url' % format_id,
|
|
||||||
'%s is not available' % format_id,
|
|
||||||
fatal=False)
|
|
||||||
if head:
|
|
||||||
video_url = head.geturl()
|
|
||||||
formats.append({
|
|
||||||
'format_id': format_id,
|
|
||||||
'url': video_url,
|
|
||||||
'width': int_or_none(video_asset.get('width')),
|
|
||||||
'height': int_or_none(video_asset.get('height')),
|
|
||||||
'tbr': tbr,
|
|
||||||
'container': video_asset.get('format'),
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
# http://feed.theplatform.com/f/2E2eJC/nbcnews also works
|
||||||
'description': info.get('description'),
|
'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byId=%s' % video_id,
|
||||||
'thumbnail': info.get('thumbnail'),
|
'ie_key': 'ThePlatformFeed',
|
||||||
'duration': int_or_none(info.get('duration')),
|
|
||||||
'timestamp': parse_iso8601(info.get('pubDate') or info.get('pub_date')),
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class MSNBCIE(InfoExtractor):
|
|
||||||
# https URLs redirect to corresponding http ones
|
|
||||||
_VALID_URL = r'https?://www\.msnbc\.com/[^/]+/watch/(?P<id>[^/]+)'
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
|
|
||||||
'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'n_hayes_Aimm_140801_272214',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'The chaotic GOP immigration vote',
|
|
||||||
'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.',
|
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
|
||||||
'timestamp': 1406937606,
|
|
||||||
'upload_date': '20140802',
|
|
||||||
'uploader': 'NBCU-NEWS',
|
|
||||||
'categories': ['MSNBC/Topics/Franchise/Best of last night', 'MSNBC/Topics/General/Congress'],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
embed_url = self._html_search_meta('embedURL', webpage)
|
|
||||||
return self.url_result(embed_url)
|
|
||||||
|
@ -3,6 +3,7 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from ..compat import compat_urllib_parse_urlencode
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
|
from ..utils import update_url_query
|
||||||
|
|
||||||
|
|
||||||
class NickIE(MTVServicesInfoExtractor):
|
class NickIE(MTVServicesInfoExtractor):
|
||||||
@ -61,3 +62,26 @@ class NickIE(MTVServicesInfoExtractor):
|
|||||||
|
|
||||||
def _extract_mgid(self, webpage):
|
def _extract_mgid(self, webpage):
|
||||||
return self._search_regex(r'data-contenturi="([^"]+)', webpage, 'mgid')
|
return self._search_regex(r'data-contenturi="([^"]+)', webpage, 'mgid')
|
||||||
|
|
||||||
|
|
||||||
|
class NickDeIE(MTVServicesInfoExtractor):
|
||||||
|
IE_NAME = 'nick.de'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?nick\.de/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.nick.de/shows/342-icarly',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
mrss_url = update_url_query(self._search_regex(
|
||||||
|
r'data-mrss=(["\'])(?P<url>http.+?)\1', webpage, 'mrss url', group='url'),
|
||||||
|
{'siteKey': 'nick.de'})
|
||||||
|
|
||||||
|
return self._get_videos_info_from_url(mrss_url, video_id)
|
||||||
|
@ -163,7 +163,7 @@ class NRKTVIE(NRKBaseIE):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '20 spørsmål 23.05.2014',
|
'title': '20 spørsmål 23.05.2014',
|
||||||
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
|
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
|
||||||
'duration': 1741.52,
|
'duration': 1741,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://tv.nrk.no/program/mdfp15000514',
|
'url': 'https://tv.nrk.no/program/mdfp15000514',
|
||||||
@ -173,7 +173,7 @@ class NRKTVIE(NRKBaseIE):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
|
'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
|
||||||
'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
|
'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
|
||||||
'duration': 4605.08,
|
'duration': 4605,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# single playlist video
|
# single playlist video
|
||||||
@ -260,30 +260,34 @@ class NRKPlaylistIE(InfoExtractor):
|
|||||||
|
|
||||||
class NRKSkoleIE(InfoExtractor):
|
class NRKSkoleIE(InfoExtractor):
|
||||||
IE_DESC = 'NRK Skole'
|
IE_DESC = 'NRK Skole'
|
||||||
_VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/klippdetalj?.*\btopic=(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://nrk.no/skole/klippdetalj?topic=nrk:klipp/616532',
|
'url': 'https://www.nrk.no/skole/?page=search&q=&mediaId=14099',
|
||||||
'md5': '04cd85877cc1913bce73c5d28a47e00f',
|
'md5': '6bc936b01f9dd8ed45bc58b252b2d9b6',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '6021',
|
'id': '6021',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Genetikk og eneggede tvillinger',
|
'title': 'Genetikk og eneggede tvillinger',
|
||||||
'description': 'md5:3aca25dcf38ec30f0363428d2b265f8d',
|
'description': 'md5:3aca25dcf38ec30f0363428d2b265f8d',
|
||||||
'duration': 399,
|
'duration': 399,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.nrk.no/skole/klippdetalj?topic=nrk%3Aklipp%2F616532#embed',
|
'url': 'https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355',
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.nrk.no/skole/klippdetalj?topic=urn:x-mediadb:21379',
|
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = compat_urllib_parse_unquote(self._match_id(url))
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(
|
||||||
|
'https://mimir.nrk.no/plugin/1.0/static?mediaId=%s' % video_id,
|
||||||
|
video_id)
|
||||||
|
|
||||||
|
nrk_id = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'<script[^>]+type=["\']application/json["\'][^>]*>({.+?})</script>',
|
||||||
|
webpage, 'application json'),
|
||||||
|
video_id)['activeMedia']['psId']
|
||||||
|
|
||||||
nrk_id = self._search_regex(r'data-nrk-id=["\'](\d+)', webpage, 'nrk id')
|
|
||||||
return self.url_result('nrk:%s' % nrk_id)
|
return self.url_result('nrk:%s' % nrk_id)
|
||||||
|
95
youtube_dl/extractor/polskieradio.py
Normal file
95
youtube_dl/extractor/polskieradio.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_str,
|
||||||
|
compat_urllib_parse_unquote,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
strip_or_none,
|
||||||
|
unified_timestamp,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PolskieRadioIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+/\d+/Artykul/(?P<id>[0-9]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943,Prof-Andrzej-Nowak-o-historii-nie-da-sie-myslec-beznamietnie',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1587943',
|
||||||
|
'title': 'Prof. Andrzej Nowak: o historii nie da się myśleć beznamiętnie',
|
||||||
|
'description': 'md5:12f954edbf3120c5e7075e17bf9fc5c5',
|
||||||
|
},
|
||||||
|
'playlist': [{
|
||||||
|
'md5': '2984ee6ce9046d91fc233bc1a864a09a',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1540576',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'md5:d4623290d4ac983bf924061c75c23a0d',
|
||||||
|
'timestamp': 1456594200,
|
||||||
|
'upload_date': '20160227',
|
||||||
|
'duration': 2364,
|
||||||
|
},
|
||||||
|
}],
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.polskieradio.pl/265/5217/Artykul/1635803,Euro-2016-nie-ma-miejsca-na-blad-Polacy-graja-ze-Szwajcaria-o-cwiercfinal',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1635803',
|
||||||
|
'title': 'Euro 2016: nie ma miejsca na błąd. Polacy grają ze Szwajcarią o ćwierćfinał',
|
||||||
|
'description': 'md5:01cb7d0cad58664095d72b51a1ebada2',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 12,
|
||||||
|
}, {
|
||||||
|
'url': 'http://polskieradio.pl/9/305/Artykul/1632955,Bardzo-popularne-slowo-remis',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# with mp4 video
|
||||||
|
'url': 'http://www.polskieradio.pl/9/299/Artykul/1634903,Brexit-Leszek-Miller-swiat-sie-nie-zawali-Europa-bedzie-trwac-dalej',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
playlist_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
|
content = self._search_regex(
|
||||||
|
r'(?s)<div[^>]+class="audio atarticle"[^>]*>(.+?)<script>',
|
||||||
|
webpage, 'content')
|
||||||
|
|
||||||
|
timestamp = unified_timestamp(self._html_search_regex(
|
||||||
|
r'(?s)<span[^>]+id="datetime2"[^>]*>(.+?)</span>',
|
||||||
|
webpage, 'timestamp', fatal=False))
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
|
||||||
|
media_urls = set()
|
||||||
|
|
||||||
|
for data_media in re.findall(r'<[^>]+data-media=({[^>]+})', content):
|
||||||
|
media = self._parse_json(data_media, playlist_id, fatal=False)
|
||||||
|
if not media.get('file') or not media.get('desc'):
|
||||||
|
continue
|
||||||
|
media_url = self._proto_relative_url(media['file'], 'http:')
|
||||||
|
if media_url in media_urls:
|
||||||
|
continue
|
||||||
|
media_urls.add(media_url)
|
||||||
|
entries.append({
|
||||||
|
'id': compat_str(media['id']),
|
||||||
|
'url': media_url,
|
||||||
|
'title': compat_urllib_parse_unquote(media['desc']),
|
||||||
|
'duration': int_or_none(media.get('length')),
|
||||||
|
'vcodec': 'none' if media.get('provider') == 'audio' else None,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
title = self._og_search_title(webpage).strip()
|
||||||
|
description = strip_or_none(self._og_search_description(webpage))
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, title, description)
|
@ -1,19 +1,32 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
qualities,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class PornHdIE(InfoExtractor):
|
class PornHdIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
|
_VALID_URL = r'https?://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
|
'url': 'http://www.pornhd.com/videos/9864/selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
|
||||||
|
'md5': 'c8b964b1f0a4b5f7f28ae3a5c9f86ad5',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9864',
|
||||||
|
'display_id': 'selfie-restroom-masturbation-fun-with-chubby-cutie-hd-porn-video',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Restroom selfie masturbation',
|
||||||
|
'description': 'md5:3748420395e03e31ac96857a8f125b2b',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'view_count': int,
|
||||||
|
'age_limit': 18,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# removed video
|
||||||
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
|
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
|
||||||
'md5': '956b8ca569f7f4d8ec563e2c41598441',
|
'md5': '956b8ca569f7f4d8ec563e2c41598441',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -25,8 +38,9 @@ class PornHdIE(InfoExtractor):
|
|||||||
'thumbnail': 're:^https?://.*\.jpg',
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
},
|
||||||
}
|
'skip': 'Not available anymore',
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
@ -38,28 +52,38 @@ class PornHdIE(InfoExtractor):
|
|||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
[r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)',
|
[r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)',
|
||||||
r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title')
|
r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title')
|
||||||
description = self._html_search_regex(
|
|
||||||
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
|
|
||||||
view_count = int_or_none(self._html_search_regex(
|
|
||||||
r'(\d+) views\s*</span>', webpage, 'view count', fatal=False))
|
|
||||||
thumbnail = self._search_regex(
|
|
||||||
r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
quality = qualities(['sd', 'hd'])
|
sources = self._parse_json(js_to_json(self._search_regex(
|
||||||
sources = json.loads(js_to_json(self._search_regex(
|
|
||||||
r"(?s)'sources'\s*:\s*(\{.+?\})\s*\}[;,)]",
|
r"(?s)'sources'\s*:\s*(\{.+?\})\s*\}[;,)]",
|
||||||
webpage, 'sources')))
|
webpage, 'sources', default='{}')), video_id)
|
||||||
|
|
||||||
|
if not sources:
|
||||||
|
message = self._html_search_regex(
|
||||||
|
r'(?s)<(div|p)[^>]+class="no-video"[^>]*>(?P<value>.+?)</\1',
|
||||||
|
webpage, 'error message', group='value')
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for qname, video_url in sources.items():
|
for format_id, video_url in sources.items():
|
||||||
if not video_url:
|
if not video_url:
|
||||||
continue
|
continue
|
||||||
|
height = int_or_none(self._search_regex(
|
||||||
|
r'^(\d+)[pP]', format_id, 'height', default=None))
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'format_id': qname,
|
'format_id': format_id,
|
||||||
'quality': quality(qname),
|
'height': height,
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'<(div|p)[^>]+class="description"[^>]*>(?P<value>[^<]+)</\1',
|
||||||
|
webpage, 'description', fatal=False, group='value')
|
||||||
|
view_count = int_or_none(self._html_search_regex(
|
||||||
|
r'(\d+) views\s*<', webpage, 'view count', fatal=False))
|
||||||
|
thumbnail = self._search_regex(
|
||||||
|
r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
@ -39,7 +40,25 @@ class PornHubIE(InfoExtractor):
|
|||||||
'dislike_count': int,
|
'dislike_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
},
|
||||||
|
}, {
|
||||||
|
# non-ASCII title
|
||||||
|
'url': 'http://www.pornhub.com/view_video.php?viewkey=1331683002',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1331683002',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '重庆婷婷女王足交',
|
||||||
|
'uploader': 'cj397186295',
|
||||||
|
'duration': 1753,
|
||||||
|
'view_count': int,
|
||||||
|
'like_count': int,
|
||||||
|
'dislike_count': int,
|
||||||
|
'comment_count': int,
|
||||||
|
'age_limit': 18,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
|
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -76,19 +95,25 @@ class PornHubIE(InfoExtractor):
|
|||||||
'PornHub said: %s' % error_msg,
|
'PornHub said: %s' % error_msg,
|
||||||
expected=True, video_id=video_id)
|
expected=True, video_id=video_id)
|
||||||
|
|
||||||
|
# video_title from flashvars contains whitespace instead of non-ASCII (see
|
||||||
|
# http://www.pornhub.com/view_video.php?viewkey=1331683002), not relying
|
||||||
|
# on that anymore.
|
||||||
|
title = self._html_search_meta(
|
||||||
|
'twitter:title', webpage, default=None) or self._search_regex(
|
||||||
|
(r'<h1[^>]+class=["\']title["\'][^>]*>(?P<title>[^<]+)',
|
||||||
|
r'<div[^>]+data-video-title=(["\'])(?P<title>.+?)\1',
|
||||||
|
r'shareTitle\s*=\s*(["\'])(?P<title>.+?)\1'),
|
||||||
|
webpage, 'title', group='title')
|
||||||
|
|
||||||
flashvars = self._parse_json(
|
flashvars = self._parse_json(
|
||||||
self._search_regex(
|
self._search_regex(
|
||||||
r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
|
r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
|
||||||
video_id)
|
video_id)
|
||||||
if flashvars:
|
if flashvars:
|
||||||
video_title = flashvars.get('video_title')
|
|
||||||
thumbnail = flashvars.get('image_url')
|
thumbnail = flashvars.get('image_url')
|
||||||
duration = int_or_none(flashvars.get('video_duration'))
|
duration = int_or_none(flashvars.get('video_duration'))
|
||||||
else:
|
else:
|
||||||
video_title, thumbnail, duration = [None] * 3
|
title, thumbnail, duration = [None] * 3
|
||||||
|
|
||||||
if not video_title:
|
|
||||||
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
|
|
||||||
|
|
||||||
video_uploader = self._html_search_regex(
|
video_uploader = self._html_search_regex(
|
||||||
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
|
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
|
||||||
@ -137,7 +162,7 @@ class PornHubIE(InfoExtractor):
|
|||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'uploader': video_uploader,
|
'uploader': video_uploader,
|
||||||
'title': video_title,
|
'title': title,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
|
@ -2,15 +2,12 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import int_or_none
|
||||||
js_to_json,
|
|
||||||
unescapeHTML,
|
|
||||||
int_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class R7IE(InfoExtractor):
|
class R7IE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)https?://
|
_VALID_URL = r'''(?x)
|
||||||
|
https?://
|
||||||
(?:
|
(?:
|
||||||
(?:[a-zA-Z]+)\.r7\.com(?:/[^/]+)+/idmedia/|
|
(?:[a-zA-Z]+)\.r7\.com(?:/[^/]+)+/idmedia/|
|
||||||
noticias\.r7\.com(?:/[^/]+)+/[^/]+-|
|
noticias\.r7\.com(?:/[^/]+)+/[^/]+-|
|
||||||
@ -25,6 +22,7 @@ class R7IE(InfoExtractor):
|
|||||||
'id': '54e7050b0cf2ff57e0279389',
|
'id': '54e7050b0cf2ff57e0279389',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Policiais humilham suspeito à beira da morte: "Morre com dignidade"',
|
'title': 'Policiais humilham suspeito à beira da morte: "Morre com dignidade"',
|
||||||
|
'description': 'md5:01812008664be76a6479aa58ec865b72',
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
'duration': 98,
|
'duration': 98,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
@ -44,45 +42,72 @@ class R7IE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
video = self._download_json(
|
||||||
'http://player.r7.com/video/i/%s' % video_id, video_id)
|
'http://player-api.r7.com/video/i/%s' % video_id, video_id)
|
||||||
|
|
||||||
item = self._parse_json(js_to_json(self._search_regex(
|
title = video['title']
|
||||||
r'(?s)var\s+item\s*=\s*({.+?});', webpage, 'player')), video_id)
|
|
||||||
|
|
||||||
title = unescapeHTML(item['title'])
|
|
||||||
thumbnail = item.get('init', {}).get('thumbUri')
|
|
||||||
duration = None
|
|
||||||
|
|
||||||
statistics = item.get('statistics', {})
|
|
||||||
like_count = int_or_none(statistics.get('likes'))
|
|
||||||
view_count = int_or_none(statistics.get('views'))
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_key, format_dict in item['playlist'][0].items():
|
media_url_hls = video.get('media_url_hls')
|
||||||
src = format_dict.get('src')
|
if media_url_hls:
|
||||||
if not src:
|
formats.extend(self._extract_m3u8_formats(
|
||||||
continue
|
media_url_hls, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
format_id = format_dict.get('format') or format_key
|
m3u8_id='hls', fatal=False))
|
||||||
if duration is None:
|
media_url = video.get('media_url')
|
||||||
duration = format_dict.get('duration')
|
if media_url:
|
||||||
if '.f4m' in src:
|
f = {
|
||||||
formats.extend(self._extract_f4m_formats(src, video_id, preference=-1))
|
'url': media_url,
|
||||||
elif src.endswith('.m3u8'):
|
'format_id': 'http',
|
||||||
formats.extend(self._extract_m3u8_formats(src, video_id, 'mp4', preference=-2))
|
}
|
||||||
else:
|
# m3u8 format always matches the http format, let's copy metadata from
|
||||||
formats.append({
|
# one to another
|
||||||
'url': src,
|
m3u8_formats = list(filter(
|
||||||
'format_id': format_id,
|
lambda f: f.get('vcodec') != 'none' and f.get('resolution') != 'multiple',
|
||||||
})
|
formats))
|
||||||
|
if len(m3u8_formats) == 1:
|
||||||
|
f_copy = m3u8_formats[0].copy()
|
||||||
|
f_copy.update(f)
|
||||||
|
f_copy['protocol'] = 'http'
|
||||||
|
f = f_copy
|
||||||
|
formats.append(f)
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
description = video.get('description')
|
||||||
|
thumbnail = video.get('thumb')
|
||||||
|
duration = int_or_none(video.get('media_duration'))
|
||||||
|
like_count = int_or_none(video.get('likes'))
|
||||||
|
view_count = int_or_none(video.get('views'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'like_count': like_count,
|
'like_count': like_count,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class R7ArticleIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:[a-zA-Z]+)\.r7\.com/(?:[^/]+/)+[^/?#&]+-(?P<id>\d+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://tv.r7.com/record-play/balanco-geral/videos/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-16102015',
|
||||||
|
'only_matching': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def suitable(cls, url):
|
||||||
|
return False if R7IE.suitable(url) else super(R7ArticleIE, cls).suitable(url)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'<div[^>]+(?:id=["\']player-|class=["\']embed["\'][^>]+id=["\'])([\da-f]{24})',
|
||||||
|
webpage, 'video id')
|
||||||
|
|
||||||
|
return self.url_result('http://player.r7.com/video/i/%s' % video_id, R7IE.ie_key())
|
||||||
|
69
youtube_dl/extractor/rockstargames.py
Normal file
69
youtube_dl/extractor/rockstargames.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RockstarGamesIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?rockstargames\.com/videos(?:/video/|#?/?\?.*\bvideo=)(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.rockstargames.com/videos/video/11544/',
|
||||||
|
'md5': '03b5caa6e357a4bd50e3143fc03e5733',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '11544',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Further Adventures in Finance and Felony Trailer',
|
||||||
|
'description': 'md5:6d31f55f30cb101b5476c4a379e324a3',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'timestamp': 1464876000,
|
||||||
|
'upload_date': '20160602',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.rockstargames.com/videos#/?video=48',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
video = self._download_json(
|
||||||
|
'https://www.rockstargames.com/videoplayer/videos/get-video.json',
|
||||||
|
video_id, query={
|
||||||
|
'id': video_id,
|
||||||
|
'locale': 'en_us',
|
||||||
|
})['video']
|
||||||
|
|
||||||
|
title = video['title']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for video in video['files_processed']['video/mp4']:
|
||||||
|
if not video.get('src'):
|
||||||
|
continue
|
||||||
|
resolution = video.get('resolution')
|
||||||
|
height = int_or_none(self._search_regex(
|
||||||
|
r'^(\d+)[pP]$', resolution or '', 'height', default=None))
|
||||||
|
formats.append({
|
||||||
|
'url': self._proto_relative_url(video['src']),
|
||||||
|
'format_id': resolution,
|
||||||
|
'height': height,
|
||||||
|
})
|
||||||
|
|
||||||
|
if not formats:
|
||||||
|
youtube_id = video.get('youtube_id')
|
||||||
|
if youtube_id:
|
||||||
|
return self.url_result(youtube_id, 'Youtube')
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': video.get('description'),
|
||||||
|
'thumbnail': self._proto_relative_url(video.get('screencap')),
|
||||||
|
'timestamp': parse_iso8601(video.get('created')),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
38
youtube_dl/extractor/sportschau.py
Normal file
38
youtube_dl/extractor/sportschau.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .wdr import WDRBaseIE
|
||||||
|
from ..utils import get_element_by_attribute
|
||||||
|
|
||||||
|
|
||||||
|
class SportschauIE(WDRBaseIE):
|
||||||
|
IE_NAME = 'Sportschau'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?sportschau\.de/(?:[^/]+/)+video-?(?P<id>[^/#?]+)\.html'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.sportschau.de/uefaeuro2016/videos/video-dfb-team-geht-gut-gelaunt-ins-spiel-gegen-polen-100.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'mdb-1140188',
|
||||||
|
'display_id': 'dfb-team-geht-gut-gelaunt-ins-spiel-gegen-polen-100',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'DFB-Team geht gut gelaunt ins Spiel gegen Polen',
|
||||||
|
'description': 'Vor dem zweiten Gruppenspiel gegen Polen herrscht gute Stimmung im deutschen Team. Insbesondere Bastian Schweinsteiger strotzt vor Optimismus nach seinem Tor gegen die Ukraine.',
|
||||||
|
'upload_date': '20160615',
|
||||||
|
},
|
||||||
|
'skip': 'Geo-restricted to Germany',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
title = get_element_by_attribute('class', 'headline', webpage)
|
||||||
|
description = self._html_search_meta('description', webpage, 'description')
|
||||||
|
|
||||||
|
info = self._extract_wdr_video(webpage, video_id)
|
||||||
|
|
||||||
|
info.update({
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
})
|
||||||
|
|
||||||
|
return info
|
@ -5,7 +5,7 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
sanitized_Request,
|
ExtractorError,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ class StreamcloudIE(InfoExtractor):
|
|||||||
IE_NAME = 'streamcloud.eu'
|
IE_NAME = 'streamcloud.eu'
|
||||||
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
|
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
|
'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
|
||||||
'md5': '6bea4c7fa5daaacc2a946b7146286686',
|
'md5': '6bea4c7fa5daaacc2a946b7146286686',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -23,7 +23,10 @@ class StreamcloudIE(InfoExtractor):
|
|||||||
'title': 'youtube-dl test video \'/\\ ä ↭',
|
'title': 'youtube-dl test video \'/\\ ä ↭',
|
||||||
},
|
},
|
||||||
'skip': 'Only available from the EU'
|
'skip': 'Only available from the EU'
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://streamcloud.eu/ua8cmfh1nbe6/NSHIP-148--KUC-NG--H264-.mp4.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@ -31,26 +34,36 @@ class StreamcloudIE(InfoExtractor):
|
|||||||
|
|
||||||
orig_webpage = self._download_webpage(url, video_id)
|
orig_webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
if '>File Not Found<' in orig_webpage:
|
||||||
|
raise ExtractorError(
|
||||||
|
'Video %s does not exist' % video_id, expected=True)
|
||||||
|
|
||||||
fields = re.findall(r'''(?x)<input\s+
|
fields = re.findall(r'''(?x)<input\s+
|
||||||
type="(?:hidden|submit)"\s+
|
type="(?:hidden|submit)"\s+
|
||||||
name="([^"]+)"\s+
|
name="([^"]+)"\s+
|
||||||
(?:id="[^"]+"\s+)?
|
(?:id="[^"]+"\s+)?
|
||||||
value="([^"]*)"
|
value="([^"]*)"
|
||||||
''', orig_webpage)
|
''', orig_webpage)
|
||||||
post = urlencode_postdata(fields)
|
|
||||||
|
|
||||||
self._sleep(12, video_id)
|
self._sleep(12, video_id)
|
||||||
headers = {
|
|
||||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
|
||||||
}
|
|
||||||
req = sanitized_Request(url, post, headers)
|
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
req, video_id, note='Downloading video page ...')
|
url, video_id, data=urlencode_postdata(fields), headers={
|
||||||
|
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||||
|
})
|
||||||
|
|
||||||
|
try:
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<h1[^>]*>([^<]+)<', webpage, 'title')
|
r'<h1[^>]*>([^<]+)<', webpage, 'title')
|
||||||
video_url = self._search_regex(
|
video_url = self._search_regex(
|
||||||
r'file:\s*"([^"]+)"', webpage, 'video URL')
|
r'file:\s*"([^"]+)"', webpage, 'video URL')
|
||||||
|
except ExtractorError:
|
||||||
|
message = self._html_search_regex(
|
||||||
|
r'(?s)<div[^>]+class=(["\']).*?msgboxinfo.*?\1[^>]*>(?P<message>.+?)</div>',
|
||||||
|
webpage, 'message', default=None, group='message')
|
||||||
|
if message:
|
||||||
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
|
||||||
|
raise
|
||||||
thumbnail = self._search_regex(
|
thumbnail = self._search_regex(
|
||||||
r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False)
|
r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False)
|
||||||
|
|
||||||
|
@ -6,17 +6,14 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
dict_get,
|
||||||
|
int_or_none,
|
||||||
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class SVTBaseIE(InfoExtractor):
|
class SVTBaseIE(InfoExtractor):
|
||||||
def _extract_video(self, url, video_id):
|
def _extract_video(self, video_info, video_id):
|
||||||
info = self._download_json(url, video_id)
|
|
||||||
|
|
||||||
title = info['context']['title']
|
|
||||||
thumbnail = info['context'].get('thumbnailImage')
|
|
||||||
|
|
||||||
video_info = info['video']
|
|
||||||
formats = []
|
formats = []
|
||||||
for vr in video_info['videoReferences']:
|
for vr in video_info['videoReferences']:
|
||||||
player_type = vr.get('playerType')
|
player_type = vr.get('playerType')
|
||||||
@ -40,27 +37,49 @@ class SVTBaseIE(InfoExtractor):
|
|||||||
'format_id': player_type,
|
'format_id': player_type,
|
||||||
'url': vurl,
|
'url': vurl,
|
||||||
})
|
})
|
||||||
|
if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
|
||||||
|
self.raise_geo_restricted('This video is only available in Sweden')
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
subtitle_references = video_info.get('subtitleReferences')
|
subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
|
||||||
if isinstance(subtitle_references, list):
|
if isinstance(subtitle_references, list):
|
||||||
for sr in subtitle_references:
|
for sr in subtitle_references:
|
||||||
subtitle_url = sr.get('url')
|
subtitle_url = sr.get('url')
|
||||||
|
subtitle_lang = sr.get('language', 'sv')
|
||||||
if subtitle_url:
|
if subtitle_url:
|
||||||
subtitles.setdefault('sv', []).append({'url': subtitle_url})
|
if determine_ext(subtitle_url) == 'm3u8':
|
||||||
|
# TODO(yan12125): handle WebVTT in m3u8 manifests
|
||||||
|
continue
|
||||||
|
|
||||||
duration = video_info.get('materialLength')
|
subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})
|
||||||
age_limit = 18 if video_info.get('inappropriateForChildren') else 0
|
|
||||||
|
title = video_info.get('title')
|
||||||
|
|
||||||
|
series = video_info.get('programTitle')
|
||||||
|
season_number = int_or_none(video_info.get('season'))
|
||||||
|
episode = video_info.get('episodeTitle')
|
||||||
|
episode_number = int_or_none(video_info.get('episodeNumber'))
|
||||||
|
|
||||||
|
duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
|
||||||
|
age_limit = None
|
||||||
|
adult = dict_get(
|
||||||
|
video_info, ('inappropriateForChildren', 'blockedForChildren'),
|
||||||
|
skip_false_values=False)
|
||||||
|
if adult is not None:
|
||||||
|
age_limit = 18 if adult else 0
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'age_limit': age_limit,
|
'age_limit': age_limit,
|
||||||
|
'series': series,
|
||||||
|
'season_number': season_number,
|
||||||
|
'episode': episode,
|
||||||
|
'episode_number': episode_number,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -68,11 +87,11 @@ class SVTIE(SVTBaseIE):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.svt.se/wd?widgetId=23991§ionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
|
'url': 'http://www.svt.se/wd?widgetId=23991§ionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
|
||||||
'md5': '9648197555fc1b49e3dc22db4af51d46',
|
'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2900353',
|
'id': '2900353',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Här trycker Jagr till Giroux (under SVT-intervjun)',
|
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
|
||||||
'duration': 27,
|
'duration': 27,
|
||||||
'age_limit': 0,
|
'age_limit': 0,
|
||||||
},
|
},
|
||||||
@ -89,15 +108,20 @@ class SVTIE(SVTBaseIE):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
widget_id = mobj.group('widget_id')
|
widget_id = mobj.group('widget_id')
|
||||||
article_id = mobj.group('id')
|
article_id = mobj.group('id')
|
||||||
return self._extract_video(
|
|
||||||
|
info = self._download_json(
|
||||||
'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
|
'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
|
||||||
article_id)
|
article_id)
|
||||||
|
|
||||||
|
info_dict = self._extract_video(info['video'], article_id)
|
||||||
|
info_dict['title'] = info['context']['title']
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
class SVTPlayIE(SVTBaseIE):
|
class SVTPlayIE(SVTBaseIE):
|
||||||
IE_DESC = 'SVT Play and Öppet arkiv'
|
IE_DESC = 'SVT Play and Öppet arkiv'
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?P<host>svtplay|oppetarkiv)\.se/video/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp)/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
|
'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
|
||||||
'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
|
'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -113,12 +137,50 @@ class SVTPlayIE(SVTBaseIE):
|
|||||||
}]
|
}]
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
# geo restricted to Sweden
|
||||||
|
'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
host = mobj.group('host')
|
webpage = self._download_webpage(url, video_id)
|
||||||
return self._extract_video(
|
|
||||||
'http://www.%s.se/video/%s?output=json' % (host, video_id),
|
data = self._parse_json(
|
||||||
video_id)
|
self._search_regex(
|
||||||
|
r'root\["__svtplay"\]\s*=\s*([^;]+);',
|
||||||
|
webpage, 'embedded data', default='{}'),
|
||||||
|
video_id, fatal=False)
|
||||||
|
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
|
||||||
|
if data:
|
||||||
|
video_info = try_get(
|
||||||
|
data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
|
||||||
|
dict)
|
||||||
|
if video_info:
|
||||||
|
info_dict = self._extract_video(video_info, video_id)
|
||||||
|
info_dict.update({
|
||||||
|
'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
})
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
|
||||||
|
webpage, 'video id', default=None)
|
||||||
|
|
||||||
|
if video_id:
|
||||||
|
data = self._download_json(
|
||||||
|
'http://www.svt.se/videoplayer-api/video/%s' % video_id, video_id)
|
||||||
|
info_dict = self._extract_video(data, video_id)
|
||||||
|
if not info_dict.get('title'):
|
||||||
|
info_dict['title'] = re.sub(
|
||||||
|
r'\s*\|\s*.+?$', '',
|
||||||
|
info_dict.get('episode') or self._og_search_title(webpage))
|
||||||
|
return info_dict
|
||||||
|
55
youtube_dl/extractor/telewebion.py
Normal file
55
youtube_dl/extractor/telewebion.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class TelewebionIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://www\.telewebion\.com/#!/episode/(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.telewebion.com/#!/episode/1263668/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1263668',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'قرعه\u200cکشی لیگ قهرمانان اروپا',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
secure_token = self._download_webpage(
|
||||||
|
'http://m.s2.telewebion.com/op/op?action=getSecurityToken', video_id)
|
||||||
|
episode_details = self._download_json(
|
||||||
|
'http://m.s2.telewebion.com/op/op', video_id,
|
||||||
|
query={'action': 'getEpisodeDetails', 'episode_id': video_id})
|
||||||
|
|
||||||
|
m3u8_url = 'http://m.s1.telewebion.com/smil/%s.m3u8?filepath=%s&m3u8=1&secure_token=%s' % (
|
||||||
|
video_id, episode_details['file_path'], secure_token)
|
||||||
|
formats = self._extract_m3u8_formats(
|
||||||
|
m3u8_url, video_id, ext='mp4', m3u8_id='hls')
|
||||||
|
|
||||||
|
picture_paths = [
|
||||||
|
episode_details.get('picture_path'),
|
||||||
|
episode_details.get('large_picture_path'),
|
||||||
|
]
|
||||||
|
|
||||||
|
thumbnails = [{
|
||||||
|
'url': picture_path,
|
||||||
|
'preference': idx,
|
||||||
|
} for idx, picture_path in enumerate(picture_paths) if picture_path is not None]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': episode_details['title'],
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'view_count': episode_details.get('view_count'),
|
||||||
|
}
|
@ -48,6 +48,6 @@ class TF1IE(InfoExtractor):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
wat_id = self._html_search_regex(
|
wat_id = self._html_search_regex(
|
||||||
r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8}).*?\1',
|
r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1',
|
||||||
webpage, 'wat id', group='id')
|
webpage, 'wat id', group='id')
|
||||||
return self.url_result('wat:%s' % wat_id, 'Wat')
|
return self.url_result('wat:%s' % wat_id, 'Wat')
|
||||||
|
@ -277,9 +277,9 @@ class ThePlatformIE(ThePlatformBaseIE):
|
|||||||
|
|
||||||
|
|
||||||
class ThePlatformFeedIE(ThePlatformBaseIE):
|
class ThePlatformFeedIE(ThePlatformBaseIE):
|
||||||
_URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s'
|
_URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s'
|
||||||
_VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)'
|
_VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[\w-]+))'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
# From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
|
# From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
|
||||||
'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
|
'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
|
||||||
'md5': '6e32495b5073ab414471b615c5ded394',
|
'md5': '6e32495b5073ab414471b615c5ded394',
|
||||||
@ -295,30 +295,36 @@ class ThePlatformFeedIE(ThePlatformBaseIE):
|
|||||||
'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
|
'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
|
||||||
'uploader': 'NBCU-NEWS',
|
'uploader': 'NBCU-NEWS',
|
||||||
},
|
},
|
||||||
}
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query)
|
||||||
|
entry = self._download_json(real_url, video_id)['entries'][0]
|
||||||
video_id = mobj.group('id')
|
|
||||||
provider_id = mobj.group('provider_id')
|
|
||||||
feed_id = mobj.group('feed_id')
|
|
||||||
|
|
||||||
real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id)
|
|
||||||
feed = self._download_json(real_url, video_id)
|
|
||||||
entry = feed['entries'][0]
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
first_video_id = None
|
first_video_id = None
|
||||||
duration = None
|
duration = None
|
||||||
|
asset_types = []
|
||||||
for item in entry['media$content']:
|
for item in entry['media$content']:
|
||||||
smil_url = item['plfile$url'] + '&mbr=true'
|
smil_url = item['plfile$url']
|
||||||
cur_video_id = ThePlatformIE._match_id(smil_url)
|
cur_video_id = ThePlatformIE._match_id(smil_url)
|
||||||
if first_video_id is None:
|
if first_video_id is None:
|
||||||
first_video_id = cur_video_id
|
first_video_id = cur_video_id
|
||||||
duration = float_or_none(item.get('plfile$duration'))
|
duration = float_or_none(item.get('plfile$duration'))
|
||||||
cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id)
|
for asset_type in item['plfile$assetTypes']:
|
||||||
|
if asset_type in asset_types:
|
||||||
|
continue
|
||||||
|
asset_types.append(asset_type)
|
||||||
|
query = {
|
||||||
|
'mbr': 'true',
|
||||||
|
'formats': item['plfile$format'],
|
||||||
|
'assetTypes': asset_type,
|
||||||
|
}
|
||||||
|
if asset_type in asset_types_query:
|
||||||
|
query.update(asset_types_query[asset_type])
|
||||||
|
cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query(
|
||||||
|
smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type)
|
||||||
formats.extend(cur_formats)
|
formats.extend(cur_formats)
|
||||||
subtitles = self._merge_subtitles(subtitles, cur_subtitles)
|
subtitles = self._merge_subtitles(subtitles, cur_subtitles)
|
||||||
|
|
||||||
@ -344,5 +350,17 @@ class ThePlatformFeedIE(ThePlatformBaseIE):
|
|||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
'categories': categories,
|
'categories': categories,
|
||||||
})
|
})
|
||||||
|
if custom_fields:
|
||||||
|
ret.update(custom_fields(entry))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
provider_id = mobj.group('provider_id')
|
||||||
|
feed_id = mobj.group('feed_id')
|
||||||
|
filter_query = mobj.group('filter')
|
||||||
|
|
||||||
|
return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)
|
||||||
|
@ -4,6 +4,12 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
clean_html,
|
||||||
|
get_element_by_attribute,
|
||||||
|
ExtractorError,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class TVPIE(InfoExtractor):
|
class TVPIE(InfoExtractor):
|
||||||
@ -21,7 +27,7 @@ class TVPIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176',
|
'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176',
|
||||||
'md5': 'c3b15ed1af288131115ff17a17c19dda',
|
'md5': 'b0005b542e5b4de643a9690326ab1257',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '17916176',
|
'id': '17916176',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -53,6 +59,11 @@ class TVPIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id)
|
'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id)
|
||||||
|
|
||||||
|
error_massage = get_element_by_attribute('class', 'msg error', webpage)
|
||||||
|
if error_massage:
|
||||||
|
raise ExtractorError('%s said: %s' % (
|
||||||
|
self.IE_NAME, clean_html(error_massage)), expected=True)
|
||||||
|
|
||||||
title = self._search_regex(
|
title = self._search_regex(
|
||||||
r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1',
|
r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1',
|
||||||
webpage, 'title', group='title')
|
webpage, 'title', group='title')
|
||||||
@ -66,24 +77,50 @@ class TVPIE(InfoExtractor):
|
|||||||
r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None)
|
r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None)
|
||||||
|
|
||||||
video_url = self._search_regex(
|
video_url = self._search_regex(
|
||||||
r'0:{src:([\'"])(?P<url>.*?)\1', webpage, 'formats', group='url', default=None)
|
r'0:{src:([\'"])(?P<url>.*?)\1', webpage,
|
||||||
if not video_url:
|
'formats', group='url', default=None)
|
||||||
|
if not video_url or 'material_niedostepny.mp4' in video_url:
|
||||||
video_url = self._download_json(
|
video_url = self._download_json(
|
||||||
'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id,
|
'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id,
|
||||||
video_id)['video_url']
|
video_id)['video_url']
|
||||||
|
|
||||||
ext = video_url.rsplit('.', 1)[-1]
|
formats = []
|
||||||
if ext != 'ism/manifest':
|
video_url_base = self._search_regex(
|
||||||
if '/' in ext:
|
r'(https?://.+?/video)(?:\.(?:ism|f4m|m3u8)|-\d+\.mp4)',
|
||||||
ext = 'mp4'
|
video_url, 'video base url', default=None)
|
||||||
|
if video_url_base:
|
||||||
|
# TODO: Current DASH formats are broken - $Time$ pattern in
|
||||||
|
# <SegmentTemplate> not implemented yet
|
||||||
|
# formats.extend(self._extract_mpd_formats(
|
||||||
|
# video_url_base + '.ism/video.mpd',
|
||||||
|
# video_id, mpd_id='dash', fatal=False))
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
video_url_base + '.ism/video.f4m',
|
||||||
|
video_id, f4m_id='hds', fatal=False))
|
||||||
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
video_url_base + '.ism/video.m3u8', video_id,
|
||||||
|
'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
|
||||||
|
self._sort_formats(m3u8_formats)
|
||||||
|
m3u8_formats = list(filter(
|
||||||
|
lambda f: f.get('vcodec') != 'none' and f.get('resolution') != 'multiple',
|
||||||
|
m3u8_formats))
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
|
for i, m3u8_format in enumerate(m3u8_formats, 2):
|
||||||
|
http_url = '%s-%d.mp4' % (video_url_base, i)
|
||||||
|
if self._is_valid_url(http_url, video_id):
|
||||||
|
f = m3u8_format.copy()
|
||||||
|
f.update({
|
||||||
|
'url': http_url,
|
||||||
|
'format_id': f['format_id'].replace('hls', 'http'),
|
||||||
|
'protocol': 'http',
|
||||||
|
})
|
||||||
|
formats.append(f)
|
||||||
|
else:
|
||||||
formats = [{
|
formats = [{
|
||||||
'format_id': 'direct',
|
'format_id': 'direct',
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': ext,
|
'ext': determine_ext(video_url, 'mp4'),
|
||||||
}]
|
}]
|
||||||
else:
|
|
||||||
m3u8_url = re.sub('([^/]*)\.ism/manifest', r'\1.ism/\1.m3u8', video_url)
|
|
||||||
formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4')
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ from ..compat import (
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
js_to_json,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
@ -454,3 +455,45 @@ class TwitchStreamIE(TwitchBaseIE):
|
|||||||
'formats': formats,
|
'formats': formats,
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TwitchClipsIE(InfoExtractor):
|
||||||
|
IE_NAME = 'twitch:clips'
|
||||||
|
_VALID_URL = r'https?://clips\.twitch\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://clips.twitch.tv/ea/AggressiveCobraPoooound',
|
||||||
|
'md5': '761769e1eafce0ffebfb4089cb3847cd',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'AggressiveCobraPoooound',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'EA Play 2016 Live from the Novo Theatre',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'creator': 'EA',
|
||||||
|
'uploader': 'stereotype_',
|
||||||
|
'uploader_id': 'stereotype_',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
clip = self._parse_json(
|
||||||
|
self._search_regex(
|
||||||
|
r'(?s)clipInfo\s*=\s*({.+?});', webpage, 'clip info'),
|
||||||
|
video_id, transform_source=js_to_json)
|
||||||
|
|
||||||
|
video_url = clip['clip_video_url']
|
||||||
|
title = clip['channel_title']
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': title,
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
'creator': clip.get('broadcaster_display_name') or clip.get('broadcaster_login'),
|
||||||
|
'uploader': clip.get('curator_login'),
|
||||||
|
'uploader_id': clip.get('curator_display_name'),
|
||||||
|
}
|
||||||
|
84
youtube_dl/extractor/vidbit.py
Normal file
84
youtube_dl/extractor/vidbit.py
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_urlparse
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
js_to_json,
|
||||||
|
remove_end,
|
||||||
|
unified_strdate,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class VidbitIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?vidbit\.co/(?:watch|embed)\?.*?\bv=(?P<id>[\da-zA-Z]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.vidbit.co/watch?v=jkL2yDOEq2',
|
||||||
|
'md5': '1a34b7f14defe3b8fafca9796892924d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'jkL2yDOEq2',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Intro to VidBit',
|
||||||
|
'description': 'md5:5e0d6142eec00b766cbf114bfd3d16b7',
|
||||||
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
|
'upload_date': '20160618',
|
||||||
|
'view_count': int,
|
||||||
|
'comment_count': int,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.vidbit.co/embed?v=jkL2yDOEq2&auto=0&water=0',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
compat_urlparse.urljoin(url, '/watch?v=%s' % video_id), video_id)
|
||||||
|
|
||||||
|
video_url, title = [None] * 2
|
||||||
|
|
||||||
|
config = self._parse_json(self._search_regex(
|
||||||
|
r'(?s)\.setup\(({.+?})\);', webpage, 'setup', default='{}'),
|
||||||
|
video_id, transform_source=js_to_json)
|
||||||
|
if config:
|
||||||
|
if config.get('file'):
|
||||||
|
video_url = compat_urlparse.urljoin(url, config['file'])
|
||||||
|
title = config.get('title')
|
||||||
|
|
||||||
|
if not video_url:
|
||||||
|
video_url = compat_urlparse.urljoin(url, self._search_regex(
|
||||||
|
r'file\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
|
||||||
|
webpage, 'video URL', group='url'))
|
||||||
|
|
||||||
|
if not title:
|
||||||
|
title = remove_end(
|
||||||
|
self._html_search_regex(
|
||||||
|
(r'<h1>(.+?)</h1>', r'<title>(.+?)</title>'),
|
||||||
|
webpage, 'title', default=None) or self._og_search_title(webpage),
|
||||||
|
' - VidBit')
|
||||||
|
|
||||||
|
description = self._html_search_meta(
|
||||||
|
('description', 'og:description', 'twitter:description'),
|
||||||
|
webpage, 'description')
|
||||||
|
|
||||||
|
upload_date = unified_strdate(self._html_search_meta(
|
||||||
|
'datePublished', webpage, 'upload date'))
|
||||||
|
|
||||||
|
view_count = int_or_none(self._search_regex(
|
||||||
|
r'<strong>(\d+)</strong> views',
|
||||||
|
webpage, 'view count', fatal=False))
|
||||||
|
comment_count = int_or_none(self._search_regex(
|
||||||
|
r'id=["\']cmt_num["\'][^>]*>\((\d+)\)',
|
||||||
|
webpage, 'comment count', fatal=False))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
'upload_date': upload_date,
|
||||||
|
'view_count': view_count,
|
||||||
|
'comment_count': comment_count,
|
||||||
|
}
|
@ -101,10 +101,13 @@ class VikiBaseIE(InfoExtractor):
|
|||||||
self.report_warning('Unable to get session token, login has probably failed')
|
self.report_warning('Unable to get session token, login has probably failed')
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def dict_selection(dict_obj, preferred_key):
|
def dict_selection(dict_obj, preferred_key, allow_fallback=True):
|
||||||
if preferred_key in dict_obj:
|
if preferred_key in dict_obj:
|
||||||
return dict_obj.get(preferred_key)
|
return dict_obj.get(preferred_key)
|
||||||
|
|
||||||
|
if not allow_fallback:
|
||||||
|
return
|
||||||
|
|
||||||
filtered_dict = list(filter(None, [dict_obj.get(k) for k in dict_obj.keys()]))
|
filtered_dict = list(filter(None, [dict_obj.get(k) for k in dict_obj.keys()]))
|
||||||
return filtered_dict[0] if filtered_dict else None
|
return filtered_dict[0] if filtered_dict else None
|
||||||
|
|
||||||
@ -127,7 +130,7 @@ class VikiIE(VikiBaseIE):
|
|||||||
}, {
|
}, {
|
||||||
# clip
|
# clip
|
||||||
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
|
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
|
||||||
'md5': '86c0b5dbd4d83a6611a79987cc7a1989',
|
'md5': 'feea2b1d7b3957f70886e6dfd8b8be84',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1067139v',
|
'id': '1067139v',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -156,17 +159,18 @@ class VikiIE(VikiBaseIE):
|
|||||||
'params': {
|
'params': {
|
||||||
# m3u8 download
|
# m3u8 download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
},
|
||||||
|
'skip': 'Blocked in the US',
|
||||||
}, {
|
}, {
|
||||||
# episode
|
# episode
|
||||||
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
|
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
|
||||||
'md5': '190f3ef426005ba3a080a63325955bc3',
|
'md5': '1f54697dabc8f13f31bf06bb2e4de6db',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '44699v',
|
'id': '44699v',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Boys Over Flowers - Episode 1',
|
'title': 'Boys Over Flowers - Episode 1',
|
||||||
'description': 'md5:52617e4f729c7d03bfd4bcbbb6e946f2',
|
'description': 'md5:b89cf50038b480b88b5b3c93589a9076',
|
||||||
'duration': 4155,
|
'duration': 4204,
|
||||||
'timestamp': 1270496524,
|
'timestamp': 1270496524,
|
||||||
'upload_date': '20100405',
|
'upload_date': '20100405',
|
||||||
'uploader': 'group8',
|
'uploader': 'group8',
|
||||||
@ -196,7 +200,7 @@ class VikiIE(VikiBaseIE):
|
|||||||
}, {
|
}, {
|
||||||
# non-English description
|
# non-English description
|
||||||
'url': 'http://www.viki.com/videos/158036v-love-in-magic',
|
'url': 'http://www.viki.com/videos/158036v-love-in-magic',
|
||||||
'md5': '1713ae35df5a521b31f6dc40730e7c9c',
|
'md5': '013dc282714e22acf9447cad14ff1208',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '158036v',
|
'id': '158036v',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -217,7 +221,7 @@ class VikiIE(VikiBaseIE):
|
|||||||
|
|
||||||
self._check_errors(video)
|
self._check_errors(video)
|
||||||
|
|
||||||
title = self.dict_selection(video.get('titles', {}), 'en')
|
title = self.dict_selection(video.get('titles', {}), 'en', allow_fallback=False)
|
||||||
if not title:
|
if not title:
|
||||||
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
|
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
|
||||||
container_titles = video.get('container', {}).get('titles', {})
|
container_titles = video.get('container', {}).get('titles', {})
|
||||||
@ -302,7 +306,7 @@ class VikiChannelIE(VikiBaseIE):
|
|||||||
'title': 'Boys Over Flowers',
|
'title': 'Boys Over Flowers',
|
||||||
'description': 'md5:ecd3cff47967fe193cff37c0bec52790',
|
'description': 'md5:ecd3cff47967fe193cff37c0bec52790',
|
||||||
},
|
},
|
||||||
'playlist_count': 70,
|
'playlist_mincount': 71,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
|
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -8,6 +8,7 @@ import itertools
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
|
compat_str,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@ -15,6 +16,7 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
NO_DEFAULT,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
@ -24,6 +26,7 @@ from ..utils import (
|
|||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -54,6 +57,26 @@ class VimeoBaseInfoExtractor(InfoExtractor):
|
|||||||
self._set_vimeo_cookie('vuid', vuid)
|
self._set_vimeo_cookie('vuid', vuid)
|
||||||
self._download_webpage(login_request, None, False, 'Wrong login info')
|
self._download_webpage(login_request, None, False, 'Wrong login info')
|
||||||
|
|
||||||
|
def _verify_video_password(self, url, video_id, webpage):
|
||||||
|
password = self._downloader.params.get('videopassword')
|
||||||
|
if password is None:
|
||||||
|
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
||||||
|
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
||||||
|
data = urlencode_postdata({
|
||||||
|
'password': password,
|
||||||
|
'token': token,
|
||||||
|
})
|
||||||
|
if url.startswith('http://'):
|
||||||
|
# vimeo only supports https now, but the user can give an http url
|
||||||
|
url = url.replace('http://', 'https://')
|
||||||
|
password_request = sanitized_Request(url + '/password', data)
|
||||||
|
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||||
|
password_request.add_header('Referer', url)
|
||||||
|
self._set_vimeo_cookie('vuid', vuid)
|
||||||
|
return self._download_webpage(
|
||||||
|
password_request, video_id,
|
||||||
|
'Verifying the password', 'Wrong password')
|
||||||
|
|
||||||
def _extract_xsrft_and_vuid(self, webpage):
|
def _extract_xsrft_and_vuid(self, webpage):
|
||||||
xsrft = self._search_regex(
|
xsrft = self._search_regex(
|
||||||
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
|
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
|
||||||
@ -66,6 +89,69 @@ class VimeoBaseInfoExtractor(InfoExtractor):
|
|||||||
def _set_vimeo_cookie(self, name, value):
|
def _set_vimeo_cookie(self, name, value):
|
||||||
self._set_cookie('vimeo.com', name, value)
|
self._set_cookie('vimeo.com', name, value)
|
||||||
|
|
||||||
|
def _vimeo_sort_formats(self, formats):
|
||||||
|
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
|
||||||
|
# at the same time without actual units specified. This lead to wrong sorting.
|
||||||
|
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'format_id'))
|
||||||
|
|
||||||
|
def _parse_config(self, config, video_id):
|
||||||
|
# Extract title
|
||||||
|
video_title = config['video']['title']
|
||||||
|
|
||||||
|
# Extract uploader, uploader_url and uploader_id
|
||||||
|
video_uploader = config['video'].get('owner', {}).get('name')
|
||||||
|
video_uploader_url = config['video'].get('owner', {}).get('url')
|
||||||
|
video_uploader_id = video_uploader_url.split('/')[-1] if video_uploader_url else None
|
||||||
|
|
||||||
|
# Extract video thumbnail
|
||||||
|
video_thumbnail = config['video'].get('thumbnail')
|
||||||
|
if video_thumbnail is None:
|
||||||
|
video_thumbs = config['video'].get('thumbs')
|
||||||
|
if video_thumbs and isinstance(video_thumbs, dict):
|
||||||
|
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
|
||||||
|
|
||||||
|
# Extract video duration
|
||||||
|
video_duration = int_or_none(config['video'].get('duration'))
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
config_files = config['video'].get('files') or config['request'].get('files', {})
|
||||||
|
for f in config_files.get('progressive', []):
|
||||||
|
video_url = f.get('url')
|
||||||
|
if not video_url:
|
||||||
|
continue
|
||||||
|
formats.append({
|
||||||
|
'url': video_url,
|
||||||
|
'format_id': 'http-%s' % f.get('quality'),
|
||||||
|
'width': int_or_none(f.get('width')),
|
||||||
|
'height': int_or_none(f.get('height')),
|
||||||
|
'fps': int_or_none(f.get('fps')),
|
||||||
|
'tbr': int_or_none(f.get('bitrate')),
|
||||||
|
})
|
||||||
|
m3u8_url = config_files.get('hls', {}).get('url')
|
||||||
|
if m3u8_url:
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
text_tracks = config['request'].get('text_tracks')
|
||||||
|
if text_tracks:
|
||||||
|
for tt in text_tracks:
|
||||||
|
subtitles[tt['lang']] = [{
|
||||||
|
'ext': 'vtt',
|
||||||
|
'url': 'https://vimeo.com' + tt['url'],
|
||||||
|
}]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'title': video_title,
|
||||||
|
'uploader': video_uploader,
|
||||||
|
'uploader_id': video_uploader_id,
|
||||||
|
'uploader_url': video_uploader_url,
|
||||||
|
'thumbnail': video_thumbnail,
|
||||||
|
'duration': video_duration,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class VimeoIE(VimeoBaseInfoExtractor):
|
class VimeoIE(VimeoBaseInfoExtractor):
|
||||||
"""Information extractor for vimeo.com."""
|
"""Information extractor for vimeo.com."""
|
||||||
@ -81,7 +167,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
\.
|
\.
|
||||||
)?
|
)?
|
||||||
vimeo(?P<pro>pro)?\.com/
|
vimeo(?P<pro>pro)?\.com/
|
||||||
(?!channels/[^/?#]+/?(?:$|[?#])|[^/]+/review/|(?:album|ondemand)/)
|
(?!(?:channels|album)/[^/?#]+/?(?:$|[?#])|[^/]+/review/|ondemand/)
|
||||||
(?:.*?/)?
|
(?:.*?/)?
|
||||||
(?:
|
(?:
|
||||||
(?:
|
(?:
|
||||||
@ -153,7 +239,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
'uploader_id': 'user18948128',
|
'uploader_id': 'user18948128',
|
||||||
'uploader': 'Jaime Marquínez Ferrándiz',
|
'uploader': 'Jaime Marquínez Ferrándiz',
|
||||||
'duration': 10,
|
'duration': 10,
|
||||||
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026',
|
'description': 'This is "youtube-dl password protected test video" by on Vimeo, the home for high quality videos and the people who love them.',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'videopassword': 'youtube-dl',
|
'videopassword': 'youtube-dl',
|
||||||
@ -162,8 +248,6 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
{
|
{
|
||||||
'url': 'http://vimeo.com/channels/keypeele/75629013',
|
'url': 'http://vimeo.com/channels/keypeele/75629013',
|
||||||
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
|
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
|
||||||
'note': 'Video is freely available via original URL '
|
|
||||||
'and protected with password when accessed via http://vimeo.com/75629013',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '75629013',
|
'id': '75629013',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -207,7 +291,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
{
|
{
|
||||||
# contains original format
|
# contains original format
|
||||||
'url': 'https://vimeo.com/33951933',
|
'url': 'https://vimeo.com/33951933',
|
||||||
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
|
'md5': '2d9f5475e0537f013d0073e812ab89e6',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '33951933',
|
'id': '33951933',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -219,6 +303,29 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
|
'description': 'md5:ae23671e82d05415868f7ad1aec21147',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
# only available via https://vimeo.com/channels/tributes/6213729 and
|
||||||
|
# not via https://vimeo.com/6213729
|
||||||
|
'url': 'https://vimeo.com/channels/tributes/6213729',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6213729',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Vimeo Tribute: The Shining',
|
||||||
|
'uploader': 'Casey Donahue',
|
||||||
|
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/caseydonahue',
|
||||||
|
'uploader_id': 'caseydonahue',
|
||||||
|
'upload_date': '20090821',
|
||||||
|
'description': 'md5:bdbf314014e58713e6e5b66eb252f4a6',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'expected_warnings': ['Unable to download JSON metadata'],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://vimeo.com/moogaloop.swf?clip_id=2539741',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
'url': 'https://vimeo.com/109815029',
|
'url': 'https://vimeo.com/109815029',
|
||||||
'note': 'Video not completely processed, "failed" seed status',
|
'note': 'Video not completely processed, "failed" seed status',
|
||||||
@ -228,6 +335,10 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
|
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
'url': 'https://vimeo.com/album/2632481/video/79010983',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
# source file returns 403: Forbidden
|
# source file returns 403: Forbidden
|
||||||
'url': 'https://vimeo.com/7809605',
|
'url': 'https://vimeo.com/7809605',
|
||||||
@ -254,26 +365,6 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
if mobj:
|
if mobj:
|
||||||
return mobj.group(1)
|
return mobj.group(1)
|
||||||
|
|
||||||
def _verify_video_password(self, url, video_id, webpage):
|
|
||||||
password = self._downloader.params.get('videopassword')
|
|
||||||
if password is None:
|
|
||||||
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
|
|
||||||
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
|
||||||
data = urlencode_postdata({
|
|
||||||
'password': password,
|
|
||||||
'token': token,
|
|
||||||
})
|
|
||||||
if url.startswith('http://'):
|
|
||||||
# vimeo only supports https now, but the user can give an http url
|
|
||||||
url = url.replace('http://', 'https://')
|
|
||||||
password_request = sanitized_Request(url + '/password', data)
|
|
||||||
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
|
||||||
password_request.add_header('Referer', url)
|
|
||||||
self._set_vimeo_cookie('vuid', vuid)
|
|
||||||
return self._download_webpage(
|
|
||||||
password_request, video_id,
|
|
||||||
'Verifying the password', 'Wrong password')
|
|
||||||
|
|
||||||
def _verify_player_video_password(self, url, video_id):
|
def _verify_player_video_password(self, url, video_id):
|
||||||
password = self._downloader.params.get('videopassword')
|
password = self._downloader.params.get('videopassword')
|
||||||
if password is None:
|
if password is None:
|
||||||
@ -304,7 +395,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
orig_url = url
|
orig_url = url
|
||||||
if mobj.group('pro') or mobj.group('player'):
|
if mobj.group('pro') or mobj.group('player'):
|
||||||
url = 'https://player.vimeo.com/video/' + video_id
|
url = 'https://player.vimeo.com/video/' + video_id
|
||||||
else:
|
elif any(p in url for p in ('play_redirect_hls', 'moogaloop.swf')):
|
||||||
url = 'https://vimeo.com/' + video_id
|
url = 'https://vimeo.com/' + video_id
|
||||||
|
|
||||||
# Retrieve video webpage to extract further information
|
# Retrieve video webpage to extract further information
|
||||||
@ -382,28 +473,24 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
if config.get('view') == 4:
|
if config.get('view') == 4:
|
||||||
config = self._verify_player_video_password(url, video_id)
|
config = self._verify_player_video_password(url, video_id)
|
||||||
|
|
||||||
|
def is_rented():
|
||||||
if '>You rented this title.<' in webpage:
|
if '>You rented this title.<' in webpage:
|
||||||
|
return True
|
||||||
|
if config.get('user', {}).get('purchased'):
|
||||||
|
return True
|
||||||
|
label = try_get(
|
||||||
|
config, lambda x: x['video']['vod']['purchase_options'][0]['label_string'], compat_str)
|
||||||
|
if label and label.startswith('You rented this'):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
if is_rented():
|
||||||
feature_id = config.get('video', {}).get('vod', {}).get('feature_id')
|
feature_id = config.get('video', {}).get('vod', {}).get('feature_id')
|
||||||
if feature_id and not data.get('force_feature_id', False):
|
if feature_id and not data.get('force_feature_id', False):
|
||||||
return self.url_result(smuggle_url(
|
return self.url_result(smuggle_url(
|
||||||
'https://player.vimeo.com/player/%s' % feature_id,
|
'https://player.vimeo.com/player/%s' % feature_id,
|
||||||
{'force_feature_id': True}), 'Vimeo')
|
{'force_feature_id': True}), 'Vimeo')
|
||||||
|
|
||||||
# Extract title
|
|
||||||
video_title = config['video']['title']
|
|
||||||
|
|
||||||
# Extract uploader, uploader_url and uploader_id
|
|
||||||
video_uploader = config['video'].get('owner', {}).get('name')
|
|
||||||
video_uploader_url = config['video'].get('owner', {}).get('url')
|
|
||||||
video_uploader_id = video_uploader_url.split('/')[-1] if video_uploader_url else None
|
|
||||||
|
|
||||||
# Extract video thumbnail
|
|
||||||
video_thumbnail = config['video'].get('thumbnail')
|
|
||||||
if video_thumbnail is None:
|
|
||||||
video_thumbs = config['video'].get('thumbs')
|
|
||||||
if video_thumbs and isinstance(video_thumbs, dict):
|
|
||||||
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
|
|
||||||
|
|
||||||
# Extract video description
|
# Extract video description
|
||||||
|
|
||||||
video_description = self._html_search_regex(
|
video_description = self._html_search_regex(
|
||||||
@ -423,9 +510,6 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
if not video_description and not mobj.group('player'):
|
if not video_description and not mobj.group('player'):
|
||||||
self._downloader.report_warning('Cannot find video description')
|
self._downloader.report_warning('Cannot find video description')
|
||||||
|
|
||||||
# Extract video duration
|
|
||||||
video_duration = int_or_none(config['video'].get('duration'))
|
|
||||||
|
|
||||||
# Extract upload date
|
# Extract upload date
|
||||||
video_upload_date = None
|
video_upload_date = None
|
||||||
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
|
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
|
||||||
@ -463,53 +547,22 @@ class VimeoIE(VimeoBaseInfoExtractor):
|
|||||||
'format_id': source_name,
|
'format_id': source_name,
|
||||||
'preference': 1,
|
'preference': 1,
|
||||||
})
|
})
|
||||||
config_files = config['video'].get('files') or config['request'].get('files', {})
|
|
||||||
for f in config_files.get('progressive', []):
|
|
||||||
video_url = f.get('url')
|
|
||||||
if not video_url:
|
|
||||||
continue
|
|
||||||
formats.append({
|
|
||||||
'url': video_url,
|
|
||||||
'format_id': 'http-%s' % f.get('quality'),
|
|
||||||
'width': int_or_none(f.get('width')),
|
|
||||||
'height': int_or_none(f.get('height')),
|
|
||||||
'fps': int_or_none(f.get('fps')),
|
|
||||||
'tbr': int_or_none(f.get('bitrate')),
|
|
||||||
})
|
|
||||||
m3u8_url = config_files.get('hls', {}).get('url')
|
|
||||||
if m3u8_url:
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
|
|
||||||
# Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps
|
|
||||||
# at the same time without actual units specified. This lead to wrong sorting.
|
|
||||||
self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'format_id'))
|
|
||||||
|
|
||||||
subtitles = {}
|
info_dict = self._parse_config(config, video_id)
|
||||||
text_tracks = config['request'].get('text_tracks')
|
formats.extend(info_dict['formats'])
|
||||||
if text_tracks:
|
self._vimeo_sort_formats(formats)
|
||||||
for tt in text_tracks:
|
info_dict.update({
|
||||||
subtitles[tt['lang']] = [{
|
|
||||||
'ext': 'vtt',
|
|
||||||
'url': 'https://vimeo.com' + tt['url'],
|
|
||||||
}]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'uploader': video_uploader,
|
|
||||||
'uploader_url': video_uploader_url,
|
|
||||||
'uploader_id': video_uploader_id,
|
|
||||||
'upload_date': video_upload_date,
|
|
||||||
'title': video_title,
|
|
||||||
'thumbnail': video_thumbnail,
|
|
||||||
'description': video_description,
|
|
||||||
'duration': video_duration,
|
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
'upload_date': video_upload_date,
|
||||||
|
'description': video_description,
|
||||||
'webpage_url': url,
|
'webpage_url': url,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
'like_count': like_count,
|
'like_count': like_count,
|
||||||
'comment_count': comment_count,
|
'comment_count': comment_count,
|
||||||
'subtitles': subtitles,
|
})
|
||||||
}
|
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
class VimeoOndemandIE(VimeoBaseInfoExtractor):
|
class VimeoOndemandIE(VimeoBaseInfoExtractor):
|
||||||
@ -603,8 +656,21 @@ class VimeoChannelIE(VimeoBaseInfoExtractor):
|
|||||||
webpage = self._login_list_password(page_url, list_id, webpage)
|
webpage = self._login_list_password(page_url, list_id, webpage)
|
||||||
yield self._extract_list_title(webpage)
|
yield self._extract_list_title(webpage)
|
||||||
|
|
||||||
for video_id in re.findall(r'id="clip_(\d+?)"', webpage):
|
# Try extracting href first since not all videos are available via
|
||||||
yield self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo')
|
# short https://vimeo.com/id URL (e.g. https://vimeo.com/channels/tributes/6213729)
|
||||||
|
clips = re.findall(
|
||||||
|
r'id="clip_(\d+)"[^>]*>\s*<a[^>]+href="(/(?:[^/]+/)*\1)', webpage)
|
||||||
|
if clips:
|
||||||
|
for video_id, video_url in clips:
|
||||||
|
yield self.url_result(
|
||||||
|
compat_urlparse.urljoin(base_url, video_url),
|
||||||
|
VimeoIE.ie_key(), video_id=video_id)
|
||||||
|
# More relaxed fallback
|
||||||
|
else:
|
||||||
|
for video_id in re.findall(r'id=["\']clip_(\d+)', webpage):
|
||||||
|
yield self.url_result(
|
||||||
|
'https://vimeo.com/%s' % video_id,
|
||||||
|
VimeoIE.ie_key(), video_id=video_id)
|
||||||
|
|
||||||
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
||||||
break
|
break
|
||||||
@ -641,7 +707,7 @@ class VimeoUserIE(VimeoChannelIE):
|
|||||||
|
|
||||||
class VimeoAlbumIE(VimeoChannelIE):
|
class VimeoAlbumIE(VimeoChannelIE):
|
||||||
IE_NAME = 'vimeo:album'
|
IE_NAME = 'vimeo:album'
|
||||||
_VALID_URL = r'https://vimeo\.com/album/(?P<id>\d+)'
|
_VALID_URL = r'https://vimeo\.com/album/(?P<id>\d+)(?:$|[?#]|/(?!video))'
|
||||||
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
|
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://vimeo.com/album/2632481',
|
'url': 'https://vimeo.com/album/2632481',
|
||||||
@ -661,6 +727,13 @@ class VimeoAlbumIE(VimeoChannelIE):
|
|||||||
'params': {
|
'params': {
|
||||||
'videopassword': 'youtube-dl',
|
'videopassword': 'youtube-dl',
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://vimeo.com/album/2632481/sort:plays/format:thumbnail',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# TODO: respect page number
|
||||||
|
'url': 'https://vimeo.com/album/2632481/page:2/sort:plays/format:thumbnail',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _page_url(self, base_url, pagenum):
|
def _page_url(self, base_url, pagenum):
|
||||||
@ -692,7 +765,7 @@ class VimeoGroupsIE(VimeoAlbumIE):
|
|||||||
return self._extract_videos(name, 'https://vimeo.com/groups/%s' % name)
|
return self._extract_videos(name, 'https://vimeo.com/groups/%s' % name)
|
||||||
|
|
||||||
|
|
||||||
class VimeoReviewIE(InfoExtractor):
|
class VimeoReviewIE(VimeoBaseInfoExtractor):
|
||||||
IE_NAME = 'vimeo:review'
|
IE_NAME = 'vimeo:review'
|
||||||
IE_DESC = 'Review pages on vimeo'
|
IE_DESC = 'Review pages on vimeo'
|
||||||
_VALID_URL = r'https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
|
_VALID_URL = r'https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
|
||||||
@ -704,6 +777,7 @@ class VimeoReviewIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "DICK HARDWICK 'Comedian'",
|
'title': "DICK HARDWICK 'Comedian'",
|
||||||
'uploader': 'Richard Hardwick',
|
'uploader': 'Richard Hardwick',
|
||||||
|
'uploader_id': 'user21297594',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'note': 'video player needs Referer',
|
'note': 'video player needs Referer',
|
||||||
@ -716,14 +790,45 @@ class VimeoReviewIE(InfoExtractor):
|
|||||||
'uploader': 'DevWeek Events',
|
'uploader': 'DevWeek Events',
|
||||||
'duration': 2773,
|
'duration': 2773,
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'uploader_id': 'user22258446',
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'note': 'Password protected',
|
||||||
|
'url': 'https://vimeo.com/user37284429/review/138823582/c4d865efde',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '138823582',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'EFFICIENT PICKUP MASTERCLASS MODULE 1',
|
||||||
|
'uploader': 'TMB',
|
||||||
|
'uploader_id': 'user37284429',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'videopassword': 'holygrail',
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
self._login()
|
||||||
|
|
||||||
|
def _get_config_url(self, webpage_url, video_id, video_password_verified=False):
|
||||||
|
webpage = self._download_webpage(webpage_url, video_id)
|
||||||
|
config_url = self._html_search_regex(
|
||||||
|
r'data-config-url="([^"]+)"', webpage, 'config URL',
|
||||||
|
default=NO_DEFAULT if video_password_verified else None)
|
||||||
|
if config_url is None:
|
||||||
|
self._verify_video_password(webpage_url, video_id, webpage)
|
||||||
|
config_url = self._get_config_url(
|
||||||
|
webpage_url, video_id, video_password_verified=True)
|
||||||
|
return config_url
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
config_url = self._get_config_url(url, video_id)
|
||||||
player_url = 'https://player.vimeo.com/player/' + video_id
|
config = self._download_json(config_url, video_id)
|
||||||
return self.url_result(player_url, 'Vimeo', video_id)
|
info_dict = self._parse_config(config, video_id)
|
||||||
|
self._vimeo_sort_formats(info_dict['formats'])
|
||||||
|
info_dict['id'] = video_id
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
class VimeoWatchLaterIE(VimeoChannelIE):
|
class VimeoWatchLaterIE(VimeoChannelIE):
|
||||||
|
@ -24,6 +24,7 @@ class VineIE(InfoExtractor):
|
|||||||
'upload_date': '20130519',
|
'upload_date': '20130519',
|
||||||
'uploader': 'Jack Dorsey',
|
'uploader': 'Jack Dorsey',
|
||||||
'uploader_id': '76',
|
'uploader_id': '76',
|
||||||
|
'view_count': int,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'repost_count': int,
|
'repost_count': int,
|
||||||
@ -39,6 +40,7 @@ class VineIE(InfoExtractor):
|
|||||||
'upload_date': '20140815',
|
'upload_date': '20140815',
|
||||||
'uploader': 'Mars Ruiz',
|
'uploader': 'Mars Ruiz',
|
||||||
'uploader_id': '1102363502380728320',
|
'uploader_id': '1102363502380728320',
|
||||||
|
'view_count': int,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'repost_count': int,
|
'repost_count': int,
|
||||||
@ -54,6 +56,7 @@ class VineIE(InfoExtractor):
|
|||||||
'upload_date': '20130430',
|
'upload_date': '20130430',
|
||||||
'uploader': 'Z3k3',
|
'uploader': 'Z3k3',
|
||||||
'uploader_id': '936470460173008896',
|
'uploader_id': '936470460173008896',
|
||||||
|
'view_count': int,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'repost_count': int,
|
'repost_count': int,
|
||||||
@ -71,6 +74,7 @@ class VineIE(InfoExtractor):
|
|||||||
'upload_date': '20150705',
|
'upload_date': '20150705',
|
||||||
'uploader': 'Pimry_zaa',
|
'uploader': 'Pimry_zaa',
|
||||||
'uploader_id': '1135760698325307392',
|
'uploader_id': '1135760698325307392',
|
||||||
|
'view_count': int,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'repost_count': int,
|
'repost_count': int,
|
||||||
@ -109,6 +113,7 @@ class VineIE(InfoExtractor):
|
|||||||
'upload_date': unified_strdate(data.get('created')),
|
'upload_date': unified_strdate(data.get('created')),
|
||||||
'uploader': username,
|
'uploader': username,
|
||||||
'uploader_id': data.get('userIdStr'),
|
'uploader_id': data.get('userIdStr'),
|
||||||
|
'view_count': int_or_none(data.get('loops', {}).get('count')),
|
||||||
'like_count': int_or_none(data.get('likes', {}).get('count')),
|
'like_count': int_or_none(data.get('likes', {}).get('count')),
|
||||||
'comment_count': int_or_none(data.get('comments', {}).get('count')),
|
'comment_count': int_or_none(data.get('comments', {}).get('count')),
|
||||||
'repost_count': int_or_none(data.get('reposts', {}).get('count')),
|
'repost_count': int_or_none(data.get('reposts', {}).get('count')),
|
||||||
|
@ -3,6 +3,7 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
import sys
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
@ -10,7 +11,6 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
sanitized_Request,
|
|
||||||
str_to_int,
|
str_to_int,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
@ -190,7 +190,7 @@ class VKIE(InfoExtractor):
|
|||||||
if username is None:
|
if username is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
login_page = self._download_webpage(
|
login_page, url_handle = self._download_webpage_handle(
|
||||||
'https://vk.com', None, 'Downloading login page')
|
'https://vk.com', None, 'Downloading login page')
|
||||||
|
|
||||||
login_form = self._hidden_inputs(login_page)
|
login_form = self._hidden_inputs(login_page)
|
||||||
@ -200,11 +200,26 @@ class VKIE(InfoExtractor):
|
|||||||
'pass': password.encode('cp1251'),
|
'pass': password.encode('cp1251'),
|
||||||
})
|
})
|
||||||
|
|
||||||
request = sanitized_Request(
|
# https://new.vk.com/ serves two same remixlhk cookies in Set-Cookie header
|
||||||
'https://login.vk.com/?act=login',
|
# and expects the first one to be set rather than second (see
|
||||||
urlencode_postdata(login_form))
|
# https://github.com/rg3/youtube-dl/issues/9841#issuecomment-227871201).
|
||||||
|
# As of RFC6265 the newer one cookie should be set into cookie store
|
||||||
|
# what actually happens.
|
||||||
|
# We will workaround this VK issue by resetting the remixlhk cookie to
|
||||||
|
# the first one manually.
|
||||||
|
cookies = url_handle.headers.get('Set-Cookie')
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
cookies = cookies.encode('iso-8859-1')
|
||||||
|
cookies = cookies.decode('utf-8')
|
||||||
|
remixlhk = re.search(r'remixlhk=(.+?);.*?\bdomain=(.+?)(?:[,;]|$)', cookies)
|
||||||
|
if remixlhk:
|
||||||
|
value, domain = remixlhk.groups()
|
||||||
|
self._set_cookie(domain, 'remixlhk', value)
|
||||||
|
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
request, None, note='Logging in as %s' % username)
|
'https://login.vk.com/?act=login', None,
|
||||||
|
note='Logging in as %s' % username,
|
||||||
|
data=urlencode_postdata(login_form))
|
||||||
|
|
||||||
if re.search(r'onLoginFailed', login_page):
|
if re.search(r'onLoginFailed', login_page):
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
|
@ -15,7 +15,87 @@ from ..utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class WDRIE(InfoExtractor):
|
class WDRBaseIE(InfoExtractor):
|
||||||
|
def _extract_wdr_video(self, webpage, display_id):
|
||||||
|
# for wdr.de the data-extension is in a tag with the class "mediaLink"
|
||||||
|
# for wdr.de radio players, in a tag with the class "wdrrPlayerPlayBtn"
|
||||||
|
# for wdrmaus its in a link to the page in a multiline "videoLink"-tag
|
||||||
|
json_metadata = self._html_search_regex(
|
||||||
|
r'class=(?:"(?:mediaLink|wdrrPlayerPlayBtn)\b[^"]*"[^>]+|"videoLink\b[^"]*"[\s]*>\n[^\n]*)data-extension="([^"]+)"',
|
||||||
|
webpage, 'media link', default=None, flags=re.MULTILINE)
|
||||||
|
|
||||||
|
if not json_metadata:
|
||||||
|
return
|
||||||
|
|
||||||
|
media_link_obj = self._parse_json(json_metadata, display_id,
|
||||||
|
transform_source=js_to_json)
|
||||||
|
jsonp_url = media_link_obj['mediaObj']['url']
|
||||||
|
|
||||||
|
metadata = self._download_json(
|
||||||
|
jsonp_url, 'metadata', transform_source=strip_jsonp)
|
||||||
|
|
||||||
|
metadata_tracker_data = metadata['trackerData']
|
||||||
|
metadata_media_resource = metadata['mediaResource']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
|
||||||
|
# check if the metadata contains a direct URL to a file
|
||||||
|
for kind, media_resource in metadata_media_resource.items():
|
||||||
|
if kind not in ('dflt', 'alt'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for tag_name, medium_url in media_resource.items():
|
||||||
|
if tag_name not in ('videoURL', 'audioURL'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
ext = determine_ext(medium_url)
|
||||||
|
if ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
medium_url, display_id, 'mp4', 'm3u8_native',
|
||||||
|
m3u8_id='hls'))
|
||||||
|
elif ext == 'f4m':
|
||||||
|
manifest_url = update_url_query(
|
||||||
|
medium_url, {'hdcore': '3.2.0', 'plugin': 'aasp-3.2.0.77.18'})
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
manifest_url, display_id, f4m_id='hds', fatal=False))
|
||||||
|
elif ext == 'smil':
|
||||||
|
formats.extend(self._extract_smil_formats(
|
||||||
|
medium_url, 'stream', fatal=False))
|
||||||
|
else:
|
||||||
|
a_format = {
|
||||||
|
'url': medium_url
|
||||||
|
}
|
||||||
|
if ext == 'unknown_video':
|
||||||
|
urlh = self._request_webpage(
|
||||||
|
medium_url, display_id, note='Determining extension')
|
||||||
|
ext = urlhandle_detect_ext(urlh)
|
||||||
|
a_format['ext'] = ext
|
||||||
|
formats.append(a_format)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
caption_url = metadata_media_resource.get('captionURL')
|
||||||
|
if caption_url:
|
||||||
|
subtitles['de'] = [{
|
||||||
|
'url': caption_url,
|
||||||
|
'ext': 'ttml',
|
||||||
|
}]
|
||||||
|
|
||||||
|
title = metadata_tracker_data['trackerClipTitle']
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': metadata_tracker_data.get('trackerClipId', display_id),
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'alt_title': metadata_tracker_data.get('trackerClipSubcategory'),
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'upload_date': unified_strdate(metadata_tracker_data.get('trackerClipAirTime')),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class WDRIE(WDRBaseIE):
|
||||||
_CURRENT_MAUS_URL = r'https?://(?:www\.)wdrmaus.de/(?:[^/]+/){1,2}[^/?#]+\.php5'
|
_CURRENT_MAUS_URL = r'https?://(?:www\.)wdrmaus.de/(?:[^/]+/){1,2}[^/?#]+\.php5'
|
||||||
_PAGE_REGEX = r'/(?:mediathek/)?[^/]+/(?P<type>[^/]+)/(?P<display_id>.+)\.html'
|
_PAGE_REGEX = r'/(?:mediathek/)?[^/]+/(?P<type>[^/]+)/(?P<display_id>.+)\.html'
|
||||||
_VALID_URL = r'(?P<page_url>https?://(?:www\d\.)?wdr\d?\.de)' + _PAGE_REGEX + '|' + _CURRENT_MAUS_URL
|
_VALID_URL = r'(?P<page_url>https?://(?:www\d\.)?wdr\d?\.de)' + _PAGE_REGEX + '|' + _CURRENT_MAUS_URL
|
||||||
@ -91,10 +171,10 @@ class WDRIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.wdrmaus.de/sachgeschichten/sachgeschichten/achterbahn.php5',
|
'url': 'http://www.wdrmaus.de/sachgeschichten/sachgeschichten/achterbahn.php5',
|
||||||
# HDS download, MD5 is unstable
|
'md5': '803138901f6368ee497b4d195bb164f2',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'mdb-186083',
|
'id': 'mdb-186083',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'upload_date': '20130919',
|
'upload_date': '20130919',
|
||||||
'title': 'Sachgeschichte - Achterbahn ',
|
'title': 'Sachgeschichte - Achterbahn ',
|
||||||
'description': '- Die Sendung mit der Maus -',
|
'description': '- Die Sendung mit der Maus -',
|
||||||
@ -120,14 +200,9 @@ class WDRIE(InfoExtractor):
|
|||||||
display_id = mobj.group('display_id')
|
display_id = mobj.group('display_id')
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
# for wdr.de the data-extension is in a tag with the class "mediaLink"
|
info_dict = self._extract_wdr_video(webpage, display_id)
|
||||||
# for wdr.de radio players, in a tag with the class "wdrrPlayerPlayBtn"
|
|
||||||
# for wdrmaus its in a link to the page in a multiline "videoLink"-tag
|
|
||||||
json_metadata = self._html_search_regex(
|
|
||||||
r'class=(?:"(?:mediaLink|wdrrPlayerPlayBtn)\b[^"]*"[^>]+|"videoLink\b[^"]*"[\s]*>\n[^\n]*)data-extension="([^"]+)"',
|
|
||||||
webpage, 'media link', default=None, flags=re.MULTILINE)
|
|
||||||
|
|
||||||
if not json_metadata:
|
if not info_dict:
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result(page_url + href[0], 'WDR')
|
self.url_result(page_url + href[0], 'WDR')
|
||||||
for href in re.findall(
|
for href in re.findall(
|
||||||
@ -140,86 +215,22 @@ class WDRIE(InfoExtractor):
|
|||||||
|
|
||||||
raise ExtractorError('No downloadable streams found', expected=True)
|
raise ExtractorError('No downloadable streams found', expected=True)
|
||||||
|
|
||||||
media_link_obj = self._parse_json(json_metadata, display_id,
|
|
||||||
transform_source=js_to_json)
|
|
||||||
jsonp_url = media_link_obj['mediaObj']['url']
|
|
||||||
|
|
||||||
metadata = self._download_json(
|
|
||||||
jsonp_url, 'metadata', transform_source=strip_jsonp)
|
|
||||||
|
|
||||||
metadata_tracker_data = metadata['trackerData']
|
|
||||||
metadata_media_resource = metadata['mediaResource']
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
|
|
||||||
# check if the metadata contains a direct URL to a file
|
|
||||||
for kind, media_resource in metadata_media_resource.items():
|
|
||||||
if kind not in ('dflt', 'alt'):
|
|
||||||
continue
|
|
||||||
|
|
||||||
for tag_name, medium_url in media_resource.items():
|
|
||||||
if tag_name not in ('videoURL', 'audioURL'):
|
|
||||||
continue
|
|
||||||
|
|
||||||
ext = determine_ext(medium_url)
|
|
||||||
if ext == 'm3u8':
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
medium_url, display_id, 'mp4', 'm3u8_native',
|
|
||||||
m3u8_id='hls'))
|
|
||||||
elif ext == 'f4m':
|
|
||||||
manifest_url = update_url_query(
|
|
||||||
medium_url, {'hdcore': '3.2.0', 'plugin': 'aasp-3.2.0.77.18'})
|
|
||||||
formats.extend(self._extract_f4m_formats(
|
|
||||||
manifest_url, display_id, f4m_id='hds', fatal=False))
|
|
||||||
elif ext == 'smil':
|
|
||||||
formats.extend(self._extract_smil_formats(
|
|
||||||
medium_url, 'stream', fatal=False))
|
|
||||||
else:
|
|
||||||
a_format = {
|
|
||||||
'url': medium_url
|
|
||||||
}
|
|
||||||
if ext == 'unknown_video':
|
|
||||||
urlh = self._request_webpage(
|
|
||||||
medium_url, display_id, note='Determining extension')
|
|
||||||
ext = urlhandle_detect_ext(urlh)
|
|
||||||
a_format['ext'] = ext
|
|
||||||
formats.append(a_format)
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
caption_url = metadata_media_resource.get('captionURL')
|
|
||||||
if caption_url:
|
|
||||||
subtitles['de'] = [{
|
|
||||||
'url': caption_url,
|
|
||||||
'ext': 'ttml',
|
|
||||||
}]
|
|
||||||
|
|
||||||
title = metadata_tracker_data.get('trackerClipTitle')
|
|
||||||
is_live = url_type == 'live'
|
is_live = url_type == 'live'
|
||||||
|
|
||||||
if is_live:
|
if is_live:
|
||||||
title = self._live_title(title)
|
info_dict.update({
|
||||||
upload_date = None
|
'title': self._live_title(info_dict['title']),
|
||||||
elif 'trackerClipAirTime' in metadata_tracker_data:
|
'upload_date': None,
|
||||||
upload_date = metadata_tracker_data['trackerClipAirTime']
|
})
|
||||||
else:
|
elif 'upload_date' not in info_dict:
|
||||||
upload_date = self._html_search_meta('DC.Date', webpage, 'upload date')
|
info_dict['upload_date'] = unified_strdate(self._html_search_meta('DC.Date', webpage, 'upload date'))
|
||||||
|
|
||||||
if upload_date:
|
info_dict.update({
|
||||||
upload_date = unified_strdate(upload_date)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': metadata_tracker_data.get('trackerClipId', display_id),
|
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
|
||||||
'alt_title': metadata_tracker_data.get('trackerClipSubcategory'),
|
|
||||||
'formats': formats,
|
|
||||||
'upload_date': upload_date,
|
|
||||||
'description': self._html_search_meta('Description', webpage),
|
'description': self._html_search_meta('Description', webpage),
|
||||||
'is_live': is_live,
|
'is_live': is_live,
|
||||||
'subtitles': subtitles,
|
})
|
||||||
}
|
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
class WDRMobileIE(InfoExtractor):
|
class WDRMobileIE(InfoExtractor):
|
||||||
|
@ -1,29 +1,33 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from .youtube import YoutubeIE
|
from .youtube import YoutubeIE
|
||||||
|
from .jwplatform import JWPlatformBaseIE
|
||||||
|
|
||||||
|
|
||||||
class WimpIE(InfoExtractor):
|
class WimpIE(JWPlatformBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?wimp\.com/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?wimp\.com/(?P<id>[^/]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.wimp.com/maruexhausted/',
|
'url': 'http://www.wimp.com/maru-is-exhausted/',
|
||||||
'md5': 'ee21217ffd66d058e8b16be340b74883',
|
'md5': 'ee21217ffd66d058e8b16be340b74883',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'maruexhausted',
|
'id': 'maru-is-exhausted',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Maru is exhausted.',
|
'title': 'Maru is exhausted.',
|
||||||
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
|
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.wimp.com/clowncar/',
|
'url': 'http://www.wimp.com/clowncar/',
|
||||||
'md5': '4e2986c793694b55b37cf92521d12bb4',
|
'md5': '5c31ad862a90dc5b1f023956faec13fe',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'clowncar',
|
'id': 'cG4CEr2aiSg',
|
||||||
'ext': 'webm',
|
'ext': 'webm',
|
||||||
'title': 'It\'s like a clown car.',
|
'title': 'Basset hound clown car...incredible!',
|
||||||
'description': 'md5:0e56db1370a6e49c5c1d19124c0d2fb2',
|
'description': '5 of my Bassets crawled in this dog loo! www.bellinghambassets.com\n\nFor licensing/usage please contact: licensing(at)jukinmediadotcom',
|
||||||
|
'upload_date': '20140303',
|
||||||
|
'uploader': 'Gretchen Hoey',
|
||||||
|
'uploader_id': 'gretchenandjeff1',
|
||||||
},
|
},
|
||||||
|
'add_ie': ['Youtube'],
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -41,14 +45,13 @@ class WimpIE(InfoExtractor):
|
|||||||
'ie_key': YoutubeIE.ie_key(),
|
'ie_key': YoutubeIE.ie_key(),
|
||||||
}
|
}
|
||||||
|
|
||||||
video_url = self._search_regex(
|
info_dict = self._extract_jwplayer_data(
|
||||||
r'<video[^>]+>\s*<source[^>]+src=(["\'])(?P<url>.+?)\1',
|
webpage, video_id, require_title=False)
|
||||||
webpage, 'video URL', group='url')
|
|
||||||
|
|
||||||
return {
|
info_dict.update({
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
|
||||||
'title': self._og_search_title(webpage),
|
'title': self._og_search_title(webpage),
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
}
|
})
|
||||||
|
|
||||||
|
return info_dict
|
||||||
|
@ -5,8 +5,10 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
qualities,
|
qualities,
|
||||||
|
remove_start,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -26,16 +28,17 @@ class WrzutaIE(InfoExtractor):
|
|||||||
'uploader_id': 'laboratoriumdextera',
|
'uploader_id': 'laboratoriumdextera',
|
||||||
'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
|
'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
|
||||||
},
|
},
|
||||||
|
'skip': 'Redirected to wrzuta.pl',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://jolka85.wrzuta.pl/audio/063jOPX5ue2/liber_natalia_szroeder_-_teraz_ty',
|
'url': 'http://vexling.wrzuta.pl/audio/01xBFabGXu6/james_horner_-_into_the_na_39_vi_world_bonus',
|
||||||
'md5': 'bc78077859bea7bcfe4295d7d7fc9025',
|
'md5': 'f80564fb5a2ec6ec59705ae2bf2ba56d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '063jOPX5ue2',
|
'id': '01xBFabGXu6',
|
||||||
'ext': 'ogg',
|
'ext': 'mp3',
|
||||||
'title': 'Liber & Natalia Szroeder - Teraz Ty',
|
'title': 'James Horner - Into The Na\'vi World [Bonus]',
|
||||||
'duration': 203,
|
'description': 'md5:30a70718b2cd9df3120fce4445b0263b',
|
||||||
'uploader_id': 'jolka85',
|
'duration': 95,
|
||||||
'description': 'md5:2d2b6340f9188c8c4cd891580e481096',
|
'uploader_id': 'vexling',
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@ -45,7 +48,10 @@ class WrzutaIE(InfoExtractor):
|
|||||||
typ = mobj.group('typ')
|
typ = mobj.group('typ')
|
||||||
uploader = mobj.group('uploader')
|
uploader = mobj.group('uploader')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||||
|
|
||||||
|
if urlh.geturl() == 'http://www.wrzuta.pl/':
|
||||||
|
raise ExtractorError('Video removed', expected=True)
|
||||||
|
|
||||||
quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
|
quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
|
||||||
|
|
||||||
@ -80,3 +86,73 @@ class WrzutaIE(InfoExtractor):
|
|||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
'age_limit': embedpage.get('minimalAge', 0),
|
'age_limit': embedpage.get('minimalAge', 0),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class WrzutaPlaylistIE(InfoExtractor):
|
||||||
|
"""
|
||||||
|
this class covers extraction of wrzuta playlist entries
|
||||||
|
the extraction process bases on following steps:
|
||||||
|
* collect information of playlist size
|
||||||
|
* download all entries provided on
|
||||||
|
the playlist webpage (the playlist is split
|
||||||
|
on two pages: first directly reached from webpage
|
||||||
|
second: downloaded on demand by ajax call and rendered
|
||||||
|
using the ajax call response)
|
||||||
|
* in case size of extracted entries not reached total number of entries
|
||||||
|
use the ajax call to collect the remaining entries
|
||||||
|
"""
|
||||||
|
|
||||||
|
IE_NAME = 'wrzuta.pl:playlist'
|
||||||
|
_VALID_URL = r'https?://(?P<uploader>[0-9a-zA-Z]+)\.wrzuta\.pl/playlista/(?P<id>[0-9a-zA-Z]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://miromak71.wrzuta.pl/playlista/7XfO4vE84iR/moja_muza',
|
||||||
|
'playlist_mincount': 14,
|
||||||
|
'info_dict': {
|
||||||
|
'id': '7XfO4vE84iR',
|
||||||
|
'title': 'Moja muza',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://heroesf70.wrzuta.pl/playlista/6Nj3wQHx756/lipiec_-_lato_2015_muzyka_swiata',
|
||||||
|
'playlist_mincount': 144,
|
||||||
|
'info_dict': {
|
||||||
|
'id': '6Nj3wQHx756',
|
||||||
|
'title': 'Lipiec - Lato 2015 Muzyka Świata',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://miromak71.wrzuta.pl/playlista/7XfO4vE84iR',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
playlist_id = mobj.group('id')
|
||||||
|
uploader = mobj.group('uploader')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
|
playlist_size = int_or_none(self._html_search_regex(
|
||||||
|
(r'<div[^>]+class=["\']playlist-counter["\'][^>]*>\d+/(\d+)',
|
||||||
|
r'<div[^>]+class=["\']all-counter["\'][^>]*>(.+?)</div>'),
|
||||||
|
webpage, 'playlist size', default=None))
|
||||||
|
|
||||||
|
playlist_title = remove_start(
|
||||||
|
self._og_search_title(webpage), 'Playlista: ')
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
if playlist_size:
|
||||||
|
entries = [
|
||||||
|
self.url_result(entry_url)
|
||||||
|
for _, entry_url in re.findall(
|
||||||
|
r'<a[^>]+href=(["\'])(http.+?)\1[^>]+class=["\']playlist-file-page',
|
||||||
|
webpage)]
|
||||||
|
if playlist_size > len(entries):
|
||||||
|
playlist_content = self._download_json(
|
||||||
|
'http://%s.wrzuta.pl/xhr/get_playlist_offset/%s' % (uploader, playlist_id),
|
||||||
|
playlist_id,
|
||||||
|
'Downloading playlist JSON',
|
||||||
|
'Unable to download playlist JSON')
|
||||||
|
entries.extend([
|
||||||
|
self.url_result(entry['filelink'])
|
||||||
|
for entry in playlist_content.get('files', []) if entry.get('filelink')])
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, playlist_title)
|
||||||
|
@ -5,8 +5,10 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
decode_packed_codes,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
NO_DEFAULT,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
@ -23,20 +25,24 @@ class XFileShareIE(InfoExtractor):
|
|||||||
('thevideobee.to', 'TheVideoBee'),
|
('thevideobee.to', 'TheVideoBee'),
|
||||||
('vidto.me', 'Vidto'),
|
('vidto.me', 'Vidto'),
|
||||||
('streamin.to', 'Streamin.To'),
|
('streamin.to', 'Streamin.To'),
|
||||||
|
('xvidstage.com', 'XVIDSTAGE'),
|
||||||
)
|
)
|
||||||
|
|
||||||
IE_DESC = 'XFileShare based sites: %s' % ', '.join(list(zip(*_SITES))[1])
|
IE_DESC = 'XFileShare based sites: %s' % ', '.join(list(zip(*_SITES))[1])
|
||||||
_VALID_URL = (r'https?://(?P<host>(?:www\.)?(?:%s))/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
|
_VALID_URL = (r'https?://(?P<host>(?:www\.)?(?:%s))/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
|
||||||
% '|'.join(re.escape(site) for site in list(zip(*_SITES))[0]))
|
% '|'.join(re.escape(site) for site in list(zip(*_SITES))[0]))
|
||||||
|
|
||||||
_FILE_NOT_FOUND_REGEX = r'>(?:404 - )?File Not Found<'
|
_FILE_NOT_FOUND_REGEXES = (
|
||||||
|
r'>(?:404 - )?File Not Found<',
|
||||||
|
r'>The file was removed by administrator<',
|
||||||
|
)
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://gorillavid.in/06y9juieqpmi',
|
'url': 'http://gorillavid.in/06y9juieqpmi',
|
||||||
'md5': '5ae4a3580620380619678ee4875893ba',
|
'md5': '5ae4a3580620380619678ee4875893ba',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '06y9juieqpmi',
|
'id': '06y9juieqpmi',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
|
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
@ -78,6 +84,17 @@ class XFileShareIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Big Buck Bunny trailer',
|
'title': 'Big Buck Bunny trailer',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://xvidstage.com/e0qcnl03co6z',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'e0qcnl03co6z',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Chucky Prank 2015.mp4',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# removed by administrator
|
||||||
|
'url': 'http://xvidstage.com/amfy7atlkx25',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -87,7 +104,7 @@ class XFileShareIE(InfoExtractor):
|
|||||||
url = 'http://%s/%s' % (mobj.group('host'), video_id)
|
url = 'http://%s/%s' % (mobj.group('host'), video_id)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
|
if any(re.search(p, webpage) for p in self._FILE_NOT_FOUND_REGEXES):
|
||||||
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||||
|
|
||||||
fields = self._hidden_inputs(webpage)
|
fields = self._hidden_inputs(webpage)
|
||||||
@ -113,10 +130,23 @@ class XFileShareIE(InfoExtractor):
|
|||||||
r'>Watch (.+) ',
|
r'>Watch (.+) ',
|
||||||
r'<h2 class="video-page-head">([^<]+)</h2>'],
|
r'<h2 class="video-page-head">([^<]+)</h2>'],
|
||||||
webpage, 'title', default=None) or self._og_search_title(webpage)).strip()
|
webpage, 'title', default=None) or self._og_search_title(webpage)).strip()
|
||||||
video_url = self._search_regex(
|
|
||||||
[r'file\s*:\s*["\'](http[^"\']+)["\'],',
|
def extract_video_url(default=NO_DEFAULT):
|
||||||
r'file_link\s*=\s*\'(https?:\/\/[0-9a-zA-z.\/\-_]+)'],
|
return self._search_regex(
|
||||||
webpage, 'file url')
|
(r'file\s*:\s*(["\'])(?P<url>http.+?)\1,',
|
||||||
|
r'file_link\s*=\s*(["\'])(?P<url>http.+?)\1',
|
||||||
|
r'addVariable\((\\?["\'])file\1\s*,\s*(\\?["\'])(?P<url>http.+?)\2\)',
|
||||||
|
r'<embed[^>]+src=(["\'])(?P<url>http.+?)\1'),
|
||||||
|
webpage, 'file url', default=default, group='url')
|
||||||
|
|
||||||
|
video_url = extract_video_url(default=None)
|
||||||
|
|
||||||
|
if not video_url:
|
||||||
|
webpage = decode_packed_codes(self._search_regex(
|
||||||
|
r"(}\('(.+)',(\d+),(\d+),'[^']*\b(?:file|embed)\b[^']*'\.split\('\|'\))",
|
||||||
|
webpage, 'packed code'))
|
||||||
|
video_url = extract_video_url()
|
||||||
|
|
||||||
thumbnail = self._search_regex(
|
thumbnail = self._search_regex(
|
||||||
r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None)
|
r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None)
|
||||||
|
|
||||||
|
@ -6,17 +6,23 @@ from ..compat import compat_urllib_parse_unquote
|
|||||||
|
|
||||||
|
|
||||||
class XNXXIE(InfoExtractor):
|
class XNXXIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://(?:video|www)\.xnxx\.com/video(?P<id>[0-9]+)/(.*)'
|
_VALID_URL = r'https?://(?:video|www)\.xnxx\.com/video-?(?P<id>[0-9a-z]+)/'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_',
|
'url': 'http://www.xnxx.com/video-55awb78/skyrim_test_video',
|
||||||
'md5': '0831677e2b4761795f68d417e0b7b445',
|
'md5': 'ef7ecee5af78f8b03dca2cf31341d3a0',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1135332',
|
'id': '55awb78',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': 'lida » Naked Funny Actress (5)',
|
'title': 'Skyrim Test Video',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.xnxx.com/video-55awb78/',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
@ -17,7 +17,7 @@ class YouPornIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
|
||||||
'md5': '71ec5fcfddacf80f495efa8b6a8d9a89',
|
'md5': '3744d24c50438cf5b6f6d59feb5055c2',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '505835',
|
'id': '505835',
|
||||||
'display_id': 'sex-ed-is-it-safe-to-masturbate-daily',
|
'display_id': 'sex-ed-is-it-safe-to-masturbate-daily',
|
||||||
@ -121,21 +121,21 @@ class YouPornIE(InfoExtractor):
|
|||||||
webpage, 'thumbnail', fatal=False, group='thumbnail')
|
webpage, 'thumbnail', fatal=False, group='thumbnail')
|
||||||
|
|
||||||
uploader = self._html_search_regex(
|
uploader = self._html_search_regex(
|
||||||
r'(?s)<div[^>]+class=["\']videoInfoBy(?:\s+[^"\']+)?["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>',
|
r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>',
|
||||||
webpage, 'uploader', fatal=False)
|
webpage, 'uploader', fatal=False)
|
||||||
upload_date = unified_strdate(self._html_search_regex(
|
upload_date = unified_strdate(self._html_search_regex(
|
||||||
r'(?s)<div[^>]+class=["\']videoInfoTime["\'][^>]*>(.+?)</div>',
|
r'(?s)<div[^>]+class=["\']videoInfo(?:Date|Time)["\'][^>]*>(.+?)</div>',
|
||||||
webpage, 'upload date', fatal=False))
|
webpage, 'upload date', fatal=False))
|
||||||
|
|
||||||
age_limit = self._rta_search(webpage)
|
age_limit = self._rta_search(webpage)
|
||||||
|
|
||||||
average_rating = int_or_none(self._search_regex(
|
average_rating = int_or_none(self._search_regex(
|
||||||
r'<div[^>]+class=["\']videoInfoRating["\'][^>]*>\s*<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>',
|
r'<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>',
|
||||||
webpage, 'average rating', fatal=False))
|
webpage, 'average rating', fatal=False))
|
||||||
|
|
||||||
view_count = str_to_int(self._search_regex(
|
view_count = str_to_int(self._search_regex(
|
||||||
r'(?s)<div[^>]+class=["\']videoInfoViews["\'][^>]*>.*?([\d,.]+)\s*</div>',
|
r'(?s)<div[^>]+class=(["\']).*?\bvideoInfoViews\b.*?\1[^>]*>.*?(?P<count>[\d,.]+)<',
|
||||||
webpage, 'view count', fatal=False))
|
webpage, 'view count', fatal=False, group='count'))
|
||||||
comment_count = str_to_int(self._search_regex(
|
comment_count = str_to_int(self._search_regex(
|
||||||
r'>All [Cc]omments? \(([\d,.]+)\)',
|
r'>All [Cc]omments? \(([\d,.]+)\)',
|
||||||
webpage, 'comment count', fatal=False))
|
webpage, 'comment count', fatal=False))
|
||||||
|
@ -501,6 +501,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
'youtube_include_dash_manifest': True,
|
'youtube_include_dash_manifest': True,
|
||||||
'format': '141',
|
'format': '141',
|
||||||
},
|
},
|
||||||
|
'skip': 'format 141 not served anymore',
|
||||||
},
|
},
|
||||||
# DASH manifest with encrypted signature
|
# DASH manifest with encrypted signature
|
||||||
{
|
{
|
||||||
@ -517,7 +518,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'youtube_include_dash_manifest': True,
|
'youtube_include_dash_manifest': True,
|
||||||
'format': '141',
|
'format': '141/bestaudio[ext=m4a]',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# JS player signature function name containing $
|
# JS player signature function name containing $
|
||||||
@ -537,7 +538,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'youtube_include_dash_manifest': True,
|
'youtube_include_dash_manifest': True,
|
||||||
'format': '141',
|
'format': '141/bestaudio[ext=m4a]',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# Controversy video
|
# Controversy video
|
||||||
@ -618,7 +619,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
|
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
|
||||||
'license': 'Standard YouTube License',
|
'license': 'Standard YouTube License',
|
||||||
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
|
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
|
||||||
'uploader': 'Olympics',
|
'uploader': 'Olympic',
|
||||||
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
|
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
@ -671,7 +672,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
|
'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
|
||||||
'uploader': 'dorappi2000',
|
'uploader': 'dorappi2000',
|
||||||
'license': 'Standard YouTube License',
|
'license': 'Standard YouTube License',
|
||||||
'formats': 'mincount:33',
|
'formats': 'mincount:32',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# DASH manifest with segment_list
|
# DASH manifest with segment_list
|
||||||
@ -691,7 +692,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
'params': {
|
'params': {
|
||||||
'youtube_include_dash_manifest': True,
|
'youtube_include_dash_manifest': True,
|
||||||
'format': '135', # bestvideo
|
'format': '135', # bestvideo
|
||||||
}
|
},
|
||||||
|
'skip': 'This live event has ended.',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
# Multifeed videos (multiple cameras), URL is for Main Camera
|
# Multifeed videos (multiple cameras), URL is for Main Camera
|
||||||
@ -762,6 +764,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
|
'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
|
||||||
},
|
},
|
||||||
'playlist_count': 2,
|
'playlist_count': 2,
|
||||||
|
'skip': 'Not multifeed anymore',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://vid.plus/FlRa-iH7PGw',
|
'url': 'http://vid.plus/FlRa-iH7PGw',
|
||||||
@ -814,6 +817,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
'skip': 'This video does not exist.',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
# Video licensed under Creative Commons
|
# Video licensed under Creative Commons
|
||||||
@ -1331,7 +1335,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
(?:[a-zA-Z-]+="[^"]*"\s+)*?
|
(?:[a-zA-Z-]+="[^"]*"\s+)*?
|
||||||
(?:title|href)="([^"]+)"\s+
|
(?:title|href)="([^"]+)"\s+
|
||||||
(?:[a-zA-Z-]+="[^"]*"\s+)*?
|
(?:[a-zA-Z-]+="[^"]*"\s+)*?
|
||||||
class="(?:yt-uix-redirect-link|yt-uix-sessionlink[^"]*)"[^>]*>
|
class="[^"]*"[^>]*>
|
||||||
[^<]+\.{3}\s*
|
[^<]+\.{3}\s*
|
||||||
</a>
|
</a>
|
||||||
''', r'\1', video_description)
|
''', r'\1', video_description)
|
||||||
|
@ -232,7 +232,7 @@ class JSInterpreter(object):
|
|||||||
def extract_function(self, funcname):
|
def extract_function(self, funcname):
|
||||||
func_m = re.search(
|
func_m = re.search(
|
||||||
r'''(?x)
|
r'''(?x)
|
||||||
(?:function\s+%s|[{;,]%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
|
(?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
|
||||||
\((?P<args>[^)]*)\)\s*
|
\((?P<args>[^)]*)\)\s*
|
||||||
\{(?P<code>[^}]+)\}''' % (
|
\{(?P<code>[^}]+)\}''' % (
|
||||||
re.escape(funcname), re.escape(funcname), re.escape(funcname)),
|
re.escape(funcname), re.escape(funcname), re.escape(funcname)),
|
||||||
|
@ -76,7 +76,7 @@ class Socks4Error(ProxyError):
|
|||||||
|
|
||||||
CODES = {
|
CODES = {
|
||||||
91: 'request rejected or failed',
|
91: 'request rejected or failed',
|
||||||
92: 'request rejected becasue SOCKS server cannot connect to identd on the client',
|
92: 'request rejected because SOCKS server cannot connect to identd on the client',
|
||||||
93: 'request rejected because the client program and identd report different user-ids'
|
93: 'request rejected because the client program and identd report different user-ids'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ def register_socks_protocols():
|
|||||||
compiled_regex_type = type(re.compile(''))
|
compiled_regex_type = type(re.compile(''))
|
||||||
|
|
||||||
std_headers = {
|
std_headers = {
|
||||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/44.0 (Chrome)',
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
|
||||||
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
|
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
|
||||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||||
'Accept-Encoding': 'gzip, deflate',
|
'Accept-Encoding': 'gzip, deflate',
|
||||||
@ -110,6 +110,49 @@ ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙ
|
|||||||
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
|
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
|
||||||
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
|
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
|
||||||
|
|
||||||
|
DATE_FORMATS = (
|
||||||
|
'%d %B %Y',
|
||||||
|
'%d %b %Y',
|
||||||
|
'%B %d %Y',
|
||||||
|
'%b %d %Y',
|
||||||
|
'%b %dst %Y %I:%M',
|
||||||
|
'%b %dnd %Y %I:%M',
|
||||||
|
'%b %dth %Y %I:%M',
|
||||||
|
'%Y %m %d',
|
||||||
|
'%Y-%m-%d',
|
||||||
|
'%Y/%m/%d',
|
||||||
|
'%Y/%m/%d %H:%M:%S',
|
||||||
|
'%Y-%m-%d %H:%M:%S',
|
||||||
|
'%Y-%m-%d %H:%M:%S.%f',
|
||||||
|
'%d.%m.%Y %H:%M',
|
||||||
|
'%d.%m.%Y %H.%M',
|
||||||
|
'%Y-%m-%dT%H:%M:%SZ',
|
||||||
|
'%Y-%m-%dT%H:%M:%S.%fZ',
|
||||||
|
'%Y-%m-%dT%H:%M:%S.%f0Z',
|
||||||
|
'%Y-%m-%dT%H:%M:%S',
|
||||||
|
'%Y-%m-%dT%H:%M:%S.%f',
|
||||||
|
'%Y-%m-%dT%H:%M',
|
||||||
|
)
|
||||||
|
|
||||||
|
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
|
||||||
|
DATE_FORMATS_DAY_FIRST.extend([
|
||||||
|
'%d-%m-%Y',
|
||||||
|
'%d.%m.%Y',
|
||||||
|
'%d.%m.%y',
|
||||||
|
'%d/%m/%Y',
|
||||||
|
'%d/%m/%y',
|
||||||
|
'%d/%m/%Y %H:%M:%S',
|
||||||
|
])
|
||||||
|
|
||||||
|
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
|
||||||
|
DATE_FORMATS_MONTH_FIRST.extend([
|
||||||
|
'%m-%d-%Y',
|
||||||
|
'%m.%d.%Y',
|
||||||
|
'%m/%d/%Y',
|
||||||
|
'%m/%d/%y',
|
||||||
|
'%m/%d/%Y %H:%M:%S',
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
def preferredencoding():
|
def preferredencoding():
|
||||||
"""Get preferred encoding.
|
"""Get preferred encoding.
|
||||||
@ -975,6 +1018,24 @@ class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
|
|||||||
https_response = http_response
|
https_response = http_response
|
||||||
|
|
||||||
|
|
||||||
|
def extract_timezone(date_str):
|
||||||
|
m = re.search(
|
||||||
|
r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
|
||||||
|
date_str)
|
||||||
|
if not m:
|
||||||
|
timezone = datetime.timedelta()
|
||||||
|
else:
|
||||||
|
date_str = date_str[:-len(m.group('tz'))]
|
||||||
|
if not m.group('sign'):
|
||||||
|
timezone = datetime.timedelta()
|
||||||
|
else:
|
||||||
|
sign = 1 if m.group('sign') == '+' else -1
|
||||||
|
timezone = datetime.timedelta(
|
||||||
|
hours=sign * int(m.group('hours')),
|
||||||
|
minutes=sign * int(m.group('minutes')))
|
||||||
|
return timezone, date_str
|
||||||
|
|
||||||
|
|
||||||
def parse_iso8601(date_str, delimiter='T', timezone=None):
|
def parse_iso8601(date_str, delimiter='T', timezone=None):
|
||||||
""" Return a UNIX timestamp from the given date """
|
""" Return a UNIX timestamp from the given date """
|
||||||
|
|
||||||
@ -984,20 +1045,8 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
|
|||||||
date_str = re.sub(r'\.[0-9]+', '', date_str)
|
date_str = re.sub(r'\.[0-9]+', '', date_str)
|
||||||
|
|
||||||
if timezone is None:
|
if timezone is None:
|
||||||
m = re.search(
|
timezone, date_str = extract_timezone(date_str)
|
||||||
r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
|
|
||||||
date_str)
|
|
||||||
if not m:
|
|
||||||
timezone = datetime.timedelta()
|
|
||||||
else:
|
|
||||||
date_str = date_str[:-len(m.group(0))]
|
|
||||||
if not m.group('sign'):
|
|
||||||
timezone = datetime.timedelta()
|
|
||||||
else:
|
|
||||||
sign = 1 if m.group('sign') == '+' else -1
|
|
||||||
timezone = datetime.timedelta(
|
|
||||||
hours=sign * int(m.group('hours')),
|
|
||||||
minutes=sign * int(m.group('minutes')))
|
|
||||||
try:
|
try:
|
||||||
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
|
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
|
||||||
dt = datetime.datetime.strptime(date_str, date_format) - timezone
|
dt = datetime.datetime.strptime(date_str, date_format) - timezone
|
||||||
@ -1006,6 +1055,10 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def date_formats(day_first=True):
|
||||||
|
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
|
||||||
|
|
||||||
|
|
||||||
def unified_strdate(date_str, day_first=True):
|
def unified_strdate(date_str, day_first=True):
|
||||||
"""Return a string with the date in the format YYYYMMDD"""
|
"""Return a string with the date in the format YYYYMMDD"""
|
||||||
|
|
||||||
@ -1014,53 +1067,11 @@ def unified_strdate(date_str, day_first=True):
|
|||||||
upload_date = None
|
upload_date = None
|
||||||
# Replace commas
|
# Replace commas
|
||||||
date_str = date_str.replace(',', ' ')
|
date_str = date_str.replace(',', ' ')
|
||||||
# %z (UTC offset) is only supported in python>=3.2
|
|
||||||
if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str):
|
|
||||||
date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
|
|
||||||
# Remove AM/PM + timezone
|
# Remove AM/PM + timezone
|
||||||
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
|
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
|
||||||
|
_, date_str = extract_timezone(date_str)
|
||||||
|
|
||||||
format_expressions = [
|
for expression in date_formats(day_first):
|
||||||
'%d %B %Y',
|
|
||||||
'%d %b %Y',
|
|
||||||
'%B %d %Y',
|
|
||||||
'%b %d %Y',
|
|
||||||
'%b %dst %Y %I:%M',
|
|
||||||
'%b %dnd %Y %I:%M',
|
|
||||||
'%b %dth %Y %I:%M',
|
|
||||||
'%Y %m %d',
|
|
||||||
'%Y-%m-%d',
|
|
||||||
'%Y/%m/%d',
|
|
||||||
'%Y/%m/%d %H:%M:%S',
|
|
||||||
'%Y-%m-%d %H:%M:%S',
|
|
||||||
'%Y-%m-%d %H:%M:%S.%f',
|
|
||||||
'%d.%m.%Y %H:%M',
|
|
||||||
'%d.%m.%Y %H.%M',
|
|
||||||
'%Y-%m-%dT%H:%M:%SZ',
|
|
||||||
'%Y-%m-%dT%H:%M:%S.%fZ',
|
|
||||||
'%Y-%m-%dT%H:%M:%S.%f0Z',
|
|
||||||
'%Y-%m-%dT%H:%M:%S',
|
|
||||||
'%Y-%m-%dT%H:%M:%S.%f',
|
|
||||||
'%Y-%m-%dT%H:%M',
|
|
||||||
]
|
|
||||||
if day_first:
|
|
||||||
format_expressions.extend([
|
|
||||||
'%d-%m-%Y',
|
|
||||||
'%d.%m.%Y',
|
|
||||||
'%d.%m.%y',
|
|
||||||
'%d/%m/%Y',
|
|
||||||
'%d/%m/%y',
|
|
||||||
'%d/%m/%Y %H:%M:%S',
|
|
||||||
])
|
|
||||||
else:
|
|
||||||
format_expressions.extend([
|
|
||||||
'%m-%d-%Y',
|
|
||||||
'%m.%d.%Y',
|
|
||||||
'%m/%d/%Y',
|
|
||||||
'%m/%d/%y',
|
|
||||||
'%m/%d/%Y %H:%M:%S',
|
|
||||||
])
|
|
||||||
for expression in format_expressions:
|
|
||||||
try:
|
try:
|
||||||
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
|
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@ -1076,6 +1087,29 @@ def unified_strdate(date_str, day_first=True):
|
|||||||
return compat_str(upload_date)
|
return compat_str(upload_date)
|
||||||
|
|
||||||
|
|
||||||
|
def unified_timestamp(date_str, day_first=True):
|
||||||
|
if date_str is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
date_str = date_str.replace(',', ' ')
|
||||||
|
|
||||||
|
pm_delta = datetime.timedelta(hours=12 if re.search(r'(?i)PM', date_str) else 0)
|
||||||
|
timezone, date_str = extract_timezone(date_str)
|
||||||
|
|
||||||
|
# Remove AM/PM + timezone
|
||||||
|
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
|
||||||
|
|
||||||
|
for expression in date_formats(day_first):
|
||||||
|
try:
|
||||||
|
dt = datetime.datetime.strptime(date_str, expression) - timezone + pm_delta
|
||||||
|
return calendar.timegm(dt.timetuple())
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
timetuple = email.utils.parsedate_tz(date_str)
|
||||||
|
if timetuple:
|
||||||
|
return calendar.timegm(timetuple.timetuple())
|
||||||
|
|
||||||
|
|
||||||
def determine_ext(url, default_ext='unknown_video'):
|
def determine_ext(url, default_ext='unknown_video'):
|
||||||
if url is None:
|
if url is None:
|
||||||
return default_ext
|
return default_ext
|
||||||
@ -1626,6 +1660,10 @@ def float_or_none(v, scale=1, invscale=1, default=None):
|
|||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def strip_or_none(v):
|
||||||
|
return None if v is None else v.strip()
|
||||||
|
|
||||||
|
|
||||||
def parse_duration(s):
|
def parse_duration(s):
|
||||||
if not isinstance(s, compat_basestring):
|
if not isinstance(s, compat_basestring):
|
||||||
return None
|
return None
|
||||||
@ -1901,6 +1939,16 @@ def dict_get(d, key_or_keys, default=None, skip_false_values=True):
|
|||||||
return d.get(key_or_keys, default)
|
return d.get(key_or_keys, default)
|
||||||
|
|
||||||
|
|
||||||
|
def try_get(src, getter, expected_type=None):
|
||||||
|
try:
|
||||||
|
v = getter(src)
|
||||||
|
except (AttributeError, KeyError, TypeError, IndexError):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if expected_type is None or isinstance(v, expected_type):
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
|
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
|
||||||
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
|
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
|
||||||
|
|
||||||
@ -1960,7 +2008,7 @@ def js_to_json(code):
|
|||||||
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
|
||||||
/\*.*?\*/|,(?=\s*[\]}])|
|
/\*.*?\*/|,(?=\s*[\]}])|
|
||||||
[a-zA-Z_][.a-zA-Z_0-9]*|
|
[a-zA-Z_][.a-zA-Z_0-9]*|
|
||||||
(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?|
|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?|
|
||||||
[0-9]+(?=\s*:)
|
[0-9]+(?=\s*:)
|
||||||
''', fix_kv, code)
|
''', fix_kv, code)
|
||||||
|
|
||||||
@ -2842,3 +2890,16 @@ def decode_packed_codes(code):
|
|||||||
return re.sub(
|
return re.sub(
|
||||||
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
|
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
|
||||||
obfucasted_code)
|
obfucasted_code)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_m3u8_attributes(attrib):
|
||||||
|
info = {}
|
||||||
|
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
|
||||||
|
if val.startswith('"'):
|
||||||
|
val = val[1:-1]
|
||||||
|
info[key] = val
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
def urshift(val, n):
|
||||||
|
return val >> n if val >= 0 else (val + 0x100000000) >> n
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__version__ = '2016.06.11.1'
|
__version__ = '2016.06.26'
|
||||||
|
Reference in New Issue
Block a user