Compare commits
171 Commits
2016.10.02
...
2016.11.02
Author | SHA1 | Date | |
---|---|---|---|
8956d6608a | |||
3365ea8929 | |||
a18aeee803 | |||
1616f9b452 | |||
02dc0a36b7 | |||
639e3b5c99 | |||
b2758123c5 | |||
f449c061d0 | |||
9c82bba05d | |||
e3577722b0 | |||
b82c33dd67 | |||
e5a088dc4b | |||
2c6da7df4a | |||
7e7a028aa4 | |||
e70a5e6566 | |||
3bf55be466 | |||
a901fc5fc2 | |||
cae6bc0118 | |||
d9ee2e5cf6 | |||
e1a0b3b81c | |||
2a048f9878 | |||
ea331f40e6 | |||
f02700a1fa | |||
f3517569f6 | |||
c725333d41 | |||
a5a8877f9c | |||
43c53a1700 | |||
ec8705117a | |||
3d8d44c7b1 | |||
88839f4380 | |||
83e9374464 | |||
773017c648 | |||
777d90dc28 | |||
3791d84acc | |||
9305a0dc60 | |||
94e08950e3 | |||
ee824a8d06 | |||
d3b6b3b95b | |||
b17422753f | |||
b0b28b8241 | |||
81cb7a5978 | |||
d2e96a8ed4 | |||
2e7c8cab55 | |||
d7d4481c6a | |||
5ace137bf4 | |||
9dde0e04e6 | |||
f16f8505b1 | |||
9dc13a6780 | |||
9aa929d337 | |||
425f3fdfcb | |||
e034cbc581 | |||
5378f8ce0d | |||
b64d04c119 | |||
00ca755231 | |||
69c2d42bd7 | |||
062e2769a3 | |||
859447a28d | |||
f8ae2c7f30 | |||
9ce0077485 | |||
0ebb86bd18 | |||
9df6b03caf | |||
8e2915d70b | |||
19e447150d | |||
ad9fd84004 | |||
60633ae9a0 | |||
a81dc82151 | |||
9218a6b4f5 | |||
02af6ec707 | |||
05b7996cab | |||
46f6052950 | |||
c8802041dd | |||
c7911009a0 | |||
2b96b06bf0 | |||
06b3fe2926 | |||
2c6743bf0f | |||
efb6242916 | |||
0384932e3d | |||
edd6074cea | |||
791d29dbf8 | |||
481cc7335c | |||
853a71b628 | |||
e2628fb6a0 | |||
df4939b1cd | |||
0b94dbb115 | |||
8d76bdf12b | |||
8204bacf1d | |||
47da782337 | |||
74324a7ac2 | |||
b0dfcab60a | |||
bbd7706898 | |||
112740e79f | |||
c0b1e88895 | |||
7cdfbbf9b8 | |||
ac943d48d3 | |||
73498a8921 | |||
9187ee4f19 | |||
2273e2c530 | |||
4b492e3579 | |||
9c4258bcec | |||
ea8aefd1d7 | |||
6edfc40a0e | |||
68d9561ca1 | |||
cfc0e7c82b | |||
4102e64051 | |||
f605242bfc | |||
d32fa0f12c | |||
a347a0d088 | |||
77c5b98dcd | |||
88ebefc054 | |||
2e638d7bca | |||
a26b174c61 | |||
73c801d660 | |||
dff5107b68 | |||
8c3e448e80 | |||
2ecbd2ad6f | |||
62a0b86e4f | |||
146969e05b | |||
e2004ccaf7 | |||
a5f8473145 | |||
b7f59a3bf6 | |||
580d411931 | |||
5c4bfd4da5 | |||
7104ae799c | |||
bcd6276520 | |||
591e384552 | |||
9feb1c9731 | |||
a093cfc78b | |||
6f20b65e72 | |||
cea364f70c | |||
55642487f0 | |||
3d643f4cec | |||
c452e69d3d | |||
555787d717 | |||
f165ca70eb | |||
27b8d2ee95 | |||
71cdcb2331 | |||
176006a120 | |||
65f4c1de3d | |||
b0082629a9 | |||
8204c73352 | |||
2b51dac1f9 | |||
f68901e50a | |||
3adb9d119e | |||
1dd58e14d8 | |||
dd4291f729 | |||
888f8d6ba4 | |||
f475e88121 | |||
3c6b3bf221 | |||
38588ab977 | |||
85bcdd081c | |||
9dcd6fd3aa | |||
98763ee354 | |||
3d83a1ae92 | |||
c0a7b9b348 | |||
831a34caa2 | |||
09b9c45e24 | |||
33898fb19c | |||
017eb82934 | |||
b1d798887e | |||
0a33bb2cb2 | |||
185744f92f | |||
7232e54813 | |||
6eb5503b12 | |||
539c881bfc | |||
c1b2a0858c | |||
215ff6e0f3 | |||
dcdb292fdd | |||
c1084ddb0c | |||
ee5de4e38e | |||
25291b979a | |||
567a5996ca |
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@ -6,8 +6,8 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.10.02*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2016.11.02*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
|
||||||
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.10.02**
|
- [ ] I've **verified** and **I assure** that I'm running youtube-dl **2016.11.02**
|
||||||
|
|
||||||
### Before submitting an *issue* make sure you have:
|
### Before submitting an *issue* make sure you have:
|
||||||
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
- [ ] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
|
||||||
@ -35,7 +35,7 @@ $ youtube-dl -v <your command line>
|
|||||||
[debug] User config: []
|
[debug] User config: []
|
||||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||||
[debug] youtube-dl version 2016.10.02
|
[debug] youtube-dl version 2016.11.02
|
||||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
|
7
AUTHORS
7
AUTHORS
@ -26,7 +26,7 @@ Albert Kim
|
|||||||
Pierre Rudloff
|
Pierre Rudloff
|
||||||
Huarong Huo
|
Huarong Huo
|
||||||
Ismael Mejía
|
Ismael Mejía
|
||||||
Steffan 'Ruirize' James
|
Steffan Donal
|
||||||
Andras Elso
|
Andras Elso
|
||||||
Jelle van der Waa
|
Jelle van der Waa
|
||||||
Marcin Cieślak
|
Marcin Cieślak
|
||||||
@ -185,3 +185,8 @@ Aleksander Nitecki
|
|||||||
Sebastian Blunt
|
Sebastian Blunt
|
||||||
Matěj Cepl
|
Matěj Cepl
|
||||||
Xie Yanbo
|
Xie Yanbo
|
||||||
|
Philip Xu
|
||||||
|
John Hawkinson
|
||||||
|
Rich Leeper
|
||||||
|
Zhong Jianxin
|
||||||
|
Thor77
|
||||||
|
@ -12,7 +12,7 @@ $ youtube-dl -v <your command line>
|
|||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
**Do not post screenshots of verbose log only plain text is acceptable.**
|
**Do not post screenshots of verbose logs; only plain text is acceptable.**
|
||||||
|
|
||||||
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ Only post features that you (or an incapacitated friend you can personally talk
|
|||||||
|
|
||||||
### Is your question about youtube-dl?
|
### Is your question about youtube-dl?
|
||||||
|
|
||||||
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different, or even the reporter's own, application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
|||||||
If you want to create a build of youtube-dl yourself, you'll need
|
If you want to create a build of youtube-dl yourself, you'll need
|
||||||
|
|
||||||
* python
|
* python
|
||||||
* make (both GNU make and BSD make are supported)
|
* make (only GNU make is supported)
|
||||||
* pandoc
|
* pandoc
|
||||||
* zip
|
* zip
|
||||||
* nosetests
|
* nosetests
|
||||||
@ -167,19 +167,19 @@ In any case, thank you very much for your contributions!
|
|||||||
|
|
||||||
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
||||||
|
|
||||||
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hoster out of your control and this layout tend to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize code dependency on source's layout changes and even to make the code foresee potential future changes and be ready for that. This is important because it will allow extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with fix incorporated all the previous version become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say some may never receive an update at all that is possible for non rolling release distros.
|
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
|
||||||
|
|
||||||
### Mandatory and optional metafields
|
### Mandatory and optional metafields
|
||||||
|
|
||||||
For extraction to work youtube-dl relies on metadata your extractor extracts and provides to youtube-dl expressed by [information dictionary](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L75-L257) or simply *info dict*. Only the following meta fields in *info dict* are considered mandatory for successful extraction process by youtube-dl:
|
For extraction to work youtube-dl relies on metadata your extractor extracts and provides to youtube-dl expressed by an [information dictionary](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L75-L257) or simply *info dict*. Only the following meta fields in the *info dict* are considered mandatory for a successful extraction process by youtube-dl:
|
||||||
|
|
||||||
- `id` (media identifier)
|
- `id` (media identifier)
|
||||||
- `title` (media title)
|
- `title` (media title)
|
||||||
- `url` (media download URL) or `formats`
|
- `url` (media download URL) or `formats`
|
||||||
|
|
||||||
In fact only the last option is technically mandatory (i.e. if you can't figure out the download location of the media the extraction does not make any sense). But by convention youtube-dl also treats `id` and `title` to be mandatory. Thus aforementioned metafields are the critical data the extraction does not make any sense without and if any of them fail to be extracted then extractor is considered completely broken.
|
In fact only the last option is technically mandatory (i.e. if you can't figure out the download location of the media the extraction does not make any sense). But by convention youtube-dl also treats `id` and `title` as mandatory. Thus the aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken.
|
||||||
|
|
||||||
[Any field](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L149-L257) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
[Any field](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L149-L257) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
@ -199,7 +199,7 @@ Assume at this point `meta`'s layout is:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Assume you want to extract `summary` and put into resulting info dict as `description`. Since `description` is optional metafield you should be ready that this key may be missing from the `meta` dict, so that you should extract it like:
|
Assume you want to extract `summary` and put it into the resulting info dict as `description`. Since `description` is an optional metafield you should be ready that this key may be missing from the `meta` dict, so that you should extract it like:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
description = meta.get('summary') # correct
|
description = meta.get('summary') # correct
|
||||||
@ -211,7 +211,7 @@ and not like:
|
|||||||
description = meta['summary'] # incorrect
|
description = meta['summary'] # incorrect
|
||||||
```
|
```
|
||||||
|
|
||||||
The latter will break extraction process with `KeyError` if `summary` disappears from `meta` at some time later but with former approach extraction will just go ahead with `description` set to `None` that is perfectly fine (remember `None` is equivalent for absence of data).
|
The latter will break extraction process with `KeyError` if `summary` disappears from `meta` at some later time but with the former approach extraction will just go ahead with `description` set to `None` which is perfectly fine (remember `None` is equivalent to the absence of data).
|
||||||
|
|
||||||
Similarly, you should pass `fatal=False` when extracting optional data from a webpage with `_search_regex`, `_html_search_regex` or similar methods, for instance:
|
Similarly, you should pass `fatal=False` when extracting optional data from a webpage with `_search_regex`, `_html_search_regex` or similar methods, for instance:
|
||||||
|
|
||||||
@ -231,21 +231,21 @@ description = self._search_regex(
|
|||||||
webpage, 'description', default=None)
|
webpage, 'description', default=None)
|
||||||
```
|
```
|
||||||
|
|
||||||
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that are known to may or may not be present.
|
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
|
||||||
|
|
||||||
### Provide fallbacks
|
### Provide fallbacks
|
||||||
|
|
||||||
When extracting metadata try to provide several scenarios for that. For example if `title` is present in several places/sources try extracting from at least some of them. This would make it more future-proof in case some of the sources became unavailable.
|
When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
Say `meta` from previous example has a `title` and you are about to extract it. Since `title` is mandatory meta field you should end up with something like:
|
Say `meta` from the previous example has a `title` and you are about to extract it. Since `title` is a mandatory meta field you should end up with something like:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
title = meta['title']
|
title = meta['title']
|
||||||
```
|
```
|
||||||
|
|
||||||
If `title` disappeares from `meta` in future due to some changes on hoster's side the extraction would fail since `title` is mandatory. That's expected.
|
If `title` disappears from `meta` in future due to some changes on the hoster's side the extraction would fail since `title` is mandatory. That's expected.
|
||||||
|
|
||||||
Assume that you have some another source you can extract `title` from, for example `og:title` HTML meta of a `webpage`. In this case you can provide a fallback scenario:
|
Assume that you have some another source you can extract `title` from, for example `og:title` HTML meta of a `webpage`. In this case you can provide a fallback scenario:
|
||||||
|
|
||||||
@ -282,7 +282,7 @@ title = self._search_regex(
|
|||||||
webpage, 'title', group='title')
|
webpage, 'title', group='title')
|
||||||
```
|
```
|
||||||
|
|
||||||
Note how you tolerate potential changes in `style` attribute's value or switch from using double quotes to single for `class` attribute:
|
Note how you tolerate potential changes in the `style` attribute's value or switch from using double quotes to single for `class` attribute:
|
||||||
|
|
||||||
The code definitely should not look like:
|
The code definitely should not look like:
|
||||||
|
|
||||||
|
150
ChangeLog
150
ChangeLog
@ -1,3 +1,153 @@
|
|||||||
|
version 2016.11.02
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ Add basic support for Smooth Streaming protocol (#8118, #10969)
|
||||||
|
* Improve MPD manifest base URL extraction (#10909, #11079)
|
||||||
|
* Fix --match-filter for int-like strings (#11082)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [mva] Add support for ISM formats
|
||||||
|
+ [msn] Add support for ISM formats
|
||||||
|
+ [onet] Add support for ISM formats
|
||||||
|
+ [tvp] Add support for ISM formats
|
||||||
|
+ [nicknight] Add support for nicknight sites (#10769)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.30
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [facebook] Improve 1080P video detection (#11073)
|
||||||
|
* [imgur] Recognize /r/ URLs (#11071)
|
||||||
|
* [beeg] Fix extraction (#11069)
|
||||||
|
* [openload] Fix extraction (#10408)
|
||||||
|
* [gvsearch] Modernize and fix search request (#11051)
|
||||||
|
* [adultswim] Fix extraction (#10979)
|
||||||
|
+ [nobelprize] Add support for nobelprize.org (#9999)
|
||||||
|
* [hornbunny] Fix extraction (#10981)
|
||||||
|
* [tvp] Improve video id extraction (#10585)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.26
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [rentv] Add support for ren.tv (#10620)
|
||||||
|
+ [ard] Detect unavailable videos (#11018)
|
||||||
|
* [vk] Fix extraction (#11022)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.25
|
||||||
|
|
||||||
|
Core
|
||||||
|
* Running youtube-dl in the background is fixed (#10996, #10706, #955)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [jamendo] Add support for jamendo.com (#10132, #10736)
|
||||||
|
+ [pandatv] Add support for panda.tv (#10736)
|
||||||
|
+ [dotsub] Support Vimeo embed (#10964)
|
||||||
|
* [litv] Fix extraction
|
||||||
|
+ [vimeo] Delegate ondemand redirects to ondemand extractor (#10994)
|
||||||
|
* [vivo] Fix extraction (#11003)
|
||||||
|
+ [twitch:stream] Add support for rebroadcasts (#10995)
|
||||||
|
* [pluralsight] Fix subtitles conversion (#10990)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.21.1
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [pluralsight] Process all clip URLs (#10984)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.21
|
||||||
|
|
||||||
|
Core
|
||||||
|
- Disable thumbnails embedding in mkv
|
||||||
|
+ Add support for Comcast multiple-system operator (#10819)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [pluralsight] Adapt to new API (#10972)
|
||||||
|
* [openload] Fix extraction (#10408, #10971)
|
||||||
|
+ [natgeo] Extract m3u8 formats (#10959)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.19
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ [utils] Expose PACKED_CODES_RE
|
||||||
|
+ [extractor/common] Extract non smil wowza mpd manifests
|
||||||
|
+ [extractor/common] Detect f4m audio-only formats
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [vidzi] Fix extraction (#10908, #10952)
|
||||||
|
* [urplay] Fix subtitles extraction
|
||||||
|
+ [urplay] Add support for urskola.se (#10915)
|
||||||
|
+ [orf] Add subtitles support (#10939)
|
||||||
|
* [youtube] Fix --no-playlist behavior for youtu.be/id URLs (#10896)
|
||||||
|
* [nrk] Relax URL regular expression (#10928)
|
||||||
|
+ [nytimes] Add support for podcasts (#10926)
|
||||||
|
* [pluralsight] Relax URL regular expression (#10941)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.16
|
||||||
|
|
||||||
|
Core
|
||||||
|
* [postprocessor/ffmpeg] Return correct filepath and ext in updated information
|
||||||
|
in FFmpegExtractAudioPP (#10879)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [ruutu] Add support for supla.fi (#10849)
|
||||||
|
+ [theoperaplatform] Add support for theoperaplatform.eu (#10914)
|
||||||
|
* [lynda] Fix height for prioritized streams
|
||||||
|
+ [lynda] Add fallback extraction scenario
|
||||||
|
* [lynda] Switch to https (#10916)
|
||||||
|
+ [huajiao] New extractor (#10917)
|
||||||
|
* [cmt] Fix mgid extraction (#10813)
|
||||||
|
+ [safari:course] Add support for techbus.safaribooksonline.com
|
||||||
|
* [orf:tvthek] Fix extraction and modernize (#10898)
|
||||||
|
* [chirbit] Fix extraction of user profile pages
|
||||||
|
* [carambatv] Fix extraction
|
||||||
|
* [canalplus] Fix extraction for some videos
|
||||||
|
* [cbsinteractive] Fix extraction for cnet.com
|
||||||
|
* [parliamentliveuk] Lower case URLs are now recognized (#10912)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.12
|
||||||
|
|
||||||
|
Core
|
||||||
|
+ Support HTML media elements without child nodes
|
||||||
|
* [Makefile] Support for GNU make < 4 is fixed; BSD make dropped (#9387)
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
* [dailymotion] Fix extraction (#10901)
|
||||||
|
* [vimeo:review] Fix extraction (#10900)
|
||||||
|
* [nhl] Correctly handle invalid formats (#10713)
|
||||||
|
* [footyroom] Fix extraction (#10810)
|
||||||
|
* [abc.net.au:iview] Fix for standalone (non series) videos (#10895)
|
||||||
|
+ [hbo] Add support for episode pages (#10892)
|
||||||
|
* [allocine] Fix extraction (#10860)
|
||||||
|
+ [nextmedia] Recognize action news on AppleDaily
|
||||||
|
* [lego] Improve info extraction and bypass geo restriction (#10872)
|
||||||
|
|
||||||
|
|
||||||
|
version 2016.10.07
|
||||||
|
|
||||||
|
Extractors
|
||||||
|
+ [iprima] Detect geo restriction
|
||||||
|
* [facebook] Fix video extraction (#10846)
|
||||||
|
+ [commonprotocols] Support direct MMS links (#10838)
|
||||||
|
+ [generic] Add support for multiple vimeo embeds (#10862)
|
||||||
|
+ [nzz] Add support for nzz.ch (#4407)
|
||||||
|
+ [npo] Detect geo restriction
|
||||||
|
+ [npo] Add support for 2doc.nl (#10842)
|
||||||
|
+ [lego] Add support for lego.com (#10369)
|
||||||
|
+ [tonline] Add support for t-online.de (#10376)
|
||||||
|
* [techtalks] Relax URL regular expression (#10840)
|
||||||
|
* [youtube:live] Extend URL regular expression (#10839)
|
||||||
|
+ [theweatherchannel] Add support for weather.com (#7188)
|
||||||
|
+ [thisoldhouse] Add support for thisoldhouse.com (#10837)
|
||||||
|
+ [nhl] Add support for wch2016.com (#10833)
|
||||||
|
* [pornoxo] Use JWPlatform to improve metadata extraction
|
||||||
|
|
||||||
|
|
||||||
version 2016.10.02
|
version 2016.10.02
|
||||||
|
|
||||||
Core
|
Core
|
||||||
|
4
Makefile
4
Makefile
@ -12,7 +12,7 @@ SHAREDIR ?= $(PREFIX)/share
|
|||||||
PYTHON ?= /usr/bin/env python
|
PYTHON ?= /usr/bin/env python
|
||||||
|
|
||||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||||
SYSCONFDIR != if [ $(PREFIX) = /usr -o $(PREFIX) = /usr/local ]; then echo /etc; else echo $(PREFIX)/etc; fi
|
SYSCONFDIR = $(shell if [ $(PREFIX) = /usr -o $(PREFIX) = /usr/local ]; then echo /etc; else echo $(PREFIX)/etc; fi)
|
||||||
|
|
||||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
install -d $(DESTDIR)$(BINDIR)
|
install -d $(DESTDIR)$(BINDIR)
|
||||||
@ -90,7 +90,7 @@ fish-completion: youtube-dl.fish
|
|||||||
|
|
||||||
lazy-extractors: youtube_dl/extractor/lazy_extractors.py
|
lazy-extractors: youtube_dl/extractor/lazy_extractors.py
|
||||||
|
|
||||||
_EXTRACTOR_FILES != find youtube_dl/extractor -iname '*.py' -and -not -iname 'lazy_extractors.py'
|
_EXTRACTOR_FILES = $(shell find youtube_dl/extractor -iname '*.py' -and -not -iname 'lazy_extractors.py')
|
||||||
youtube_dl/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
youtube_dl/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
|
76
README.md
76
README.md
@ -449,12 +449,12 @@ You can use `--ignore-config` if you want to disable the configuration file for
|
|||||||
|
|
||||||
### Authentication with `.netrc` file
|
### Authentication with `.netrc` file
|
||||||
|
|
||||||
You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on per extractor basis. For that you will need to create a `.netrc` file in your `$HOME` and restrict permissions to read/write by you only:
|
You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every youtube-dl execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](http://stackoverflow.com/tags/.netrc/info) on a per extractor basis. For that you will need to create a `.netrc` file in your `$HOME` and restrict permissions to read/write by only you:
|
||||||
```
|
```
|
||||||
touch $HOME/.netrc
|
touch $HOME/.netrc
|
||||||
chmod a-rwx,u+rw $HOME/.netrc
|
chmod a-rwx,u+rw $HOME/.netrc
|
||||||
```
|
```
|
||||||
After that you can add credentials for extractor in the following format, where *extractor* is the name of extractor in lowercase:
|
After that you can add credentials for an extractor in the following format, where *extractor* is the name of the extractor in lowercase:
|
||||||
```
|
```
|
||||||
machine <extractor> login <login> password <password>
|
machine <extractor> login <login> password <password>
|
||||||
```
|
```
|
||||||
@ -550,13 +550,13 @@ Available for the media that is a track or a part of a music album:
|
|||||||
- `disc_number`: Number of the disc or other physical medium the track belongs to
|
- `disc_number`: Number of the disc or other physical medium the track belongs to
|
||||||
- `release_year`: Year (YYYY) when the album was released
|
- `release_year`: Year (YYYY) when the album was released
|
||||||
|
|
||||||
Each aforementioned sequence when referenced in output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by particular extractor, such sequences will be replaced with `NA`.
|
Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with `NA`.
|
||||||
|
|
||||||
For example for `-o %(title)s-%(id)s.%(ext)s` and mp4 video with title `youtube-dl test video` and id `BaW_jenozKcj` this will result in a `youtube-dl test video-BaW_jenozKcj.mp4` file created in the current directory.
|
For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `youtube-dl test video` and id `BaW_jenozKcj`, this will result in a `youtube-dl test video-BaW_jenozKcj.mp4` file created in the current directory.
|
||||||
|
|
||||||
Output template can also contain arbitrary hierarchical path, e.g. `-o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s'` that will result in downloading each video in a directory corresponding to this path template. Any missing directory will be automatically created for you.
|
Output templates can also contain arbitrary hierarchical path, e.g. `-o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s'` which will result in downloading each video in a directory corresponding to this path template. Any missing directory will be automatically created for you.
|
||||||
|
|
||||||
To specify percent literal in output template use `%%`. To output to stdout use `-o -`.
|
To use percent literals in an output template use `%%`. To output to stdout use `-o -`.
|
||||||
|
|
||||||
The current default template is `%(title)s-%(id)s.%(ext)s`.
|
The current default template is `%(title)s-%(id)s.%(ext)s`.
|
||||||
|
|
||||||
@ -564,7 +564,7 @@ In some cases, you don't want special characters such as 中, spaces, or &, such
|
|||||||
|
|
||||||
#### Output template and Windows batch files
|
#### Output template and Windows batch files
|
||||||
|
|
||||||
If you are using output template inside a Windows batch file then you must escape plain percent characters (`%`) by doubling, so that `-o "%(title)s-%(id)s.%(ext)s"` should become `-o "%%(title)s-%%(id)s.%%(ext)s"`. However you should not touch `%`'s that are not plain characters, e.g. environment variables for expansion should stay intact: `-o "C:\%HOMEPATH%\Desktop\%%(title)s.%%(ext)s"`.
|
If you are using an output template inside a Windows batch file then you must escape plain percent characters (`%`) by doubling, so that `-o "%(title)s-%(id)s.%(ext)s"` should become `-o "%%(title)s-%%(id)s.%%(ext)s"`. However you should not touch `%`'s that are not plain characters, e.g. environment variables for expansion should stay intact: `-o "C:\%HOMEPATH%\Desktop\%%(title)s.%%(ext)s"`.
|
||||||
|
|
||||||
#### Output template examples
|
#### Output template examples
|
||||||
|
|
||||||
@ -597,7 +597,7 @@ $ youtube-dl -o - BaW_jenozKc
|
|||||||
|
|
||||||
By default youtube-dl tries to download the best available quality, i.e. if you want the best quality you **don't need** to pass any special options, youtube-dl will guess it for you by **default**.
|
By default youtube-dl tries to download the best available quality, i.e. if you want the best quality you **don't need** to pass any special options, youtube-dl will guess it for you by **default**.
|
||||||
|
|
||||||
But sometimes you may want to download in a different format, for example when you are on a slow or intermittent connection. The key mechanism for achieving this is so called *format selection* based on which you can explicitly specify desired format, select formats based on some criterion or criteria, setup precedence and much more.
|
But sometimes you may want to download in a different format, for example when you are on a slow or intermittent connection. The key mechanism for achieving this is so-called *format selection* based on which you can explicitly specify desired format, select formats based on some criterion or criteria, setup precedence and much more.
|
||||||
|
|
||||||
The general syntax for format selection is `--format FORMAT` or shorter `-f FORMAT` where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
|
The general syntax for format selection is `--format FORMAT` or shorter `-f FORMAT` where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
|
||||||
|
|
||||||
@ -605,21 +605,21 @@ The general syntax for format selection is `--format FORMAT` or shorter `-f FORM
|
|||||||
|
|
||||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||||
|
|
||||||
You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download best quality format of particular file extension served as a single file, e.g. `-f webm` will download best quality format with `webm` extension served as a single file.
|
You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download the best quality format of a particular file extension served as a single file, e.g. `-f webm` will download the best quality format with the `webm` extension served as a single file.
|
||||||
|
|
||||||
You can also use special names to select particular edge case format:
|
You can also use special names to select particular edge case formats:
|
||||||
- `best`: Select best quality format represented by single file with video and audio
|
- `best`: Select the best quality format represented by a single file with video and audio.
|
||||||
- `worst`: Select worst quality format represented by single file with video and audio
|
- `worst`: Select the worst quality format represented by a single file with video and audio.
|
||||||
- `bestvideo`: Select best quality video only format (e.g. DASH video), may not be available
|
- `bestvideo`: Select the best quality video-only format (e.g. DASH video). May not be available.
|
||||||
- `worstvideo`: Select worst quality video only format, may not be available
|
- `worstvideo`: Select the worst quality video-only format. May not be available.
|
||||||
- `bestaudio`: Select best quality audio only format, may not be available
|
- `bestaudio`: Select the best quality audio only-format. May not be available.
|
||||||
- `worstaudio`: Select worst quality audio only format, may not be available
|
- `worstaudio`: Select the worst quality audio only-format. May not be available.
|
||||||
|
|
||||||
For example, to download worst quality video only format you can use `-f worstvideo`.
|
For example, to download the worst quality video-only format you can use `-f worstvideo`.
|
||||||
|
|
||||||
If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes. Note that slash is left-associative, i.e. formats on the left hand side are preferred, for example `-f 22/17/18` will download format 22 if it's available, otherwise it will download format 17 if it's available, otherwise it will download format 18 if it's available, otherwise it will complain that no suitable formats are available for download.
|
If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes. Note that slash is left-associative, i.e. formats on the left hand side are preferred, for example `-f 22/17/18` will download format 22 if it's available, otherwise it will download format 17 if it's available, otherwise it will download format 18 if it's available, otherwise it will complain that no suitable formats are available for download.
|
||||||
|
|
||||||
If you want to download several formats of the same video use comma as a separator, e.g. `-f 22,17,18` will download all these three formats, of course if they are available. Or more sophisticated example combined with precedence feature `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`.
|
If you want to download several formats of the same video use a comma as a separator, e.g. `-f 22,17,18` will download all these three formats, of course if they are available. Or a more sophisticated example combined with the precedence feature: `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`.
|
||||||
|
|
||||||
You can also filter the video formats by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`).
|
You can also filter the video formats by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`).
|
||||||
|
|
||||||
@ -641,15 +641,15 @@ Also filtering work for comparisons `=` (equals), `!=` (not equals), `^=` (begin
|
|||||||
- `protocol`: The protocol that will be used for the actual download, lower-case. `http`, `https`, `rtsp`, `rtmp`, `rtmpe`, `m3u8`, or `m3u8_native`
|
- `protocol`: The protocol that will be used for the actual download, lower-case. `http`, `https`, `rtsp`, `rtmp`, `rtmpe`, `m3u8`, or `m3u8_native`
|
||||||
- `format_id`: A short description of the format
|
- `format_id`: A short description of the format
|
||||||
|
|
||||||
Note that none of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by video hoster.
|
Note that none of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the video hoster.
|
||||||
|
|
||||||
Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s.
|
Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "[height <=? 720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s.
|
||||||
|
|
||||||
You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv installed), for example `-f bestvideo+bestaudio` will download best video only format, best audio only format and mux them together with ffmpeg/avconv.
|
You can merge the video and audio of two formats into a single file using `-f <video-format>+<audio-format>` (requires ffmpeg or avconv installed), for example `-f bestvideo+bestaudio` will download the best video-only format, the best audio-only format and mux them together with ffmpeg/avconv.
|
||||||
|
|
||||||
Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
|
Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
|
||||||
|
|
||||||
Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see [#5447](https://github.com/rg3/youtube-dl/issues/5447), [#5456](https://github.com/rg3/youtube-dl/issues/5456)). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some DASH formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
|
Since the end of April 2015 and version 2015.04.26, youtube-dl uses `-f bestvideo+bestaudio/best` as the default format selection (see [#5447](https://github.com/rg3/youtube-dl/issues/5447), [#5456](https://github.com/rg3/youtube-dl/issues/5456)). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some DASH formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
|
||||||
|
|
||||||
If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
|
If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
|
||||||
|
|
||||||
@ -728,7 +728,7 @@ Add a file exclusion for `youtube-dl.exe` in Windows Defender settings.
|
|||||||
|
|
||||||
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
||||||
|
|
||||||
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging guys](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
|
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging people](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
|
||||||
|
|
||||||
### I'm getting an error when trying to use output template: `error: using output template conflicts with using title, video ID or auto number`
|
### I'm getting an error when trying to use output template: `error: using output template conflicts with using title, video ID or auto number`
|
||||||
|
|
||||||
@ -902,7 +902,7 @@ If you want to find out whether a given URL is supported, simply call youtube-dl
|
|||||||
|
|
||||||
# Why do I need to go through that much red tape when filing bugs?
|
# Why do I need to go through that much red tape when filing bugs?
|
||||||
|
|
||||||
Before we had the issue template, despite our extensive [bug reporting instructions](#bugs), about 80% of the issue reports we got were useless, for instance because people used ancient versions hundreds of releases old, because of simple syntactic errors (not in youtube-dl but in general shell usage), because the problem was alrady reported multiple times before, because people did not actually read an error message, even if it said "please install ffmpeg", because people did not mention the URL they were trying to download and many more simple, easy-to-avoid problems, many of whom were totally unrelated to youtube-dl.
|
Before we had the issue template, despite our extensive [bug reporting instructions](#bugs), about 80% of the issue reports we got were useless, for instance because people used ancient versions hundreds of releases old, because of simple syntactic errors (not in youtube-dl but in general shell usage), because the problem was already reported multiple times before, because people did not actually read an error message, even if it said "please install ffmpeg", because people did not mention the URL they were trying to download and many more simple, easy-to-avoid problems, many of whom were totally unrelated to youtube-dl.
|
||||||
|
|
||||||
youtube-dl is an open-source project manned by too few volunteers, so we'd rather spend time fixing bugs where we are certain none of those simple problems apply, and where we can be reasonably confident to be able to reproduce the issue without asking the reporter repeatedly. As such, the output of `youtube-dl -v YOUR_URL_HERE` is really all that's required to file an issue. The issue template also guides you through some basic steps you can do, such as checking that your version of youtube-dl is current.
|
youtube-dl is an open-source project manned by too few volunteers, so we'd rather spend time fixing bugs where we are certain none of those simple problems apply, and where we can be reasonably confident to be able to reproduce the issue without asking the reporter repeatedly. As such, the output of `youtube-dl -v YOUR_URL_HERE` is really all that's required to file an issue. The issue template also guides you through some basic steps you can do, such as checking that your version of youtube-dl is current.
|
||||||
|
|
||||||
@ -923,7 +923,7 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
|||||||
If you want to create a build of youtube-dl yourself, you'll need
|
If you want to create a build of youtube-dl yourself, you'll need
|
||||||
|
|
||||||
* python
|
* python
|
||||||
* make (both GNU make and BSD make are supported)
|
* make (only GNU make is supported)
|
||||||
* pandoc
|
* pandoc
|
||||||
* zip
|
* zip
|
||||||
* nosetests
|
* nosetests
|
||||||
@ -1005,19 +1005,19 @@ In any case, thank you very much for your contributions!
|
|||||||
|
|
||||||
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code.
|
||||||
|
|
||||||
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hoster out of your control and this layout tend to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize code dependency on source's layout changes and even to make the code foresee potential future changes and be ready for that. This is important because it will allow extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with fix incorporated all the previous version become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say some may never receive an update at all that is possible for non rolling release distros.
|
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
|
||||||
|
|
||||||
### Mandatory and optional metafields
|
### Mandatory and optional metafields
|
||||||
|
|
||||||
For extraction to work youtube-dl relies on metadata your extractor extracts and provides to youtube-dl expressed by [information dictionary](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L75-L257) or simply *info dict*. Only the following meta fields in *info dict* are considered mandatory for successful extraction process by youtube-dl:
|
For extraction to work youtube-dl relies on metadata your extractor extracts and provides to youtube-dl expressed by an [information dictionary](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L75-L257) or simply *info dict*. Only the following meta fields in the *info dict* are considered mandatory for a successful extraction process by youtube-dl:
|
||||||
|
|
||||||
- `id` (media identifier)
|
- `id` (media identifier)
|
||||||
- `title` (media title)
|
- `title` (media title)
|
||||||
- `url` (media download URL) or `formats`
|
- `url` (media download URL) or `formats`
|
||||||
|
|
||||||
In fact only the last option is technically mandatory (i.e. if you can't figure out the download location of the media the extraction does not make any sense). But by convention youtube-dl also treats `id` and `title` to be mandatory. Thus aforementioned metafields are the critical data the extraction does not make any sense without and if any of them fail to be extracted then extractor is considered completely broken.
|
In fact only the last option is technically mandatory (i.e. if you can't figure out the download location of the media the extraction does not make any sense). But by convention youtube-dl also treats `id` and `title` as mandatory. Thus the aforementioned metafields are the critical data that the extraction does not make any sense without and if any of them fail to be extracted then the extractor is considered completely broken.
|
||||||
|
|
||||||
[Any field](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L149-L257) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
[Any field](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L149-L257) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
@ -1037,7 +1037,7 @@ Assume at this point `meta`'s layout is:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Assume you want to extract `summary` and put into resulting info dict as `description`. Since `description` is optional metafield you should be ready that this key may be missing from the `meta` dict, so that you should extract it like:
|
Assume you want to extract `summary` and put it into the resulting info dict as `description`. Since `description` is an optional metafield you should be ready that this key may be missing from the `meta` dict, so that you should extract it like:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
description = meta.get('summary') # correct
|
description = meta.get('summary') # correct
|
||||||
@ -1049,7 +1049,7 @@ and not like:
|
|||||||
description = meta['summary'] # incorrect
|
description = meta['summary'] # incorrect
|
||||||
```
|
```
|
||||||
|
|
||||||
The latter will break extraction process with `KeyError` if `summary` disappears from `meta` at some time later but with former approach extraction will just go ahead with `description` set to `None` that is perfectly fine (remember `None` is equivalent for absence of data).
|
The latter will break extraction process with `KeyError` if `summary` disappears from `meta` at some later time but with the former approach extraction will just go ahead with `description` set to `None` which is perfectly fine (remember `None` is equivalent to the absence of data).
|
||||||
|
|
||||||
Similarly, you should pass `fatal=False` when extracting optional data from a webpage with `_search_regex`, `_html_search_regex` or similar methods, for instance:
|
Similarly, you should pass `fatal=False` when extracting optional data from a webpage with `_search_regex`, `_html_search_regex` or similar methods, for instance:
|
||||||
|
|
||||||
@ -1069,21 +1069,21 @@ description = self._search_regex(
|
|||||||
webpage, 'description', default=None)
|
webpage, 'description', default=None)
|
||||||
```
|
```
|
||||||
|
|
||||||
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that are known to may or may not be present.
|
On failure this code will silently continue the extraction with `description` set to `None`. That is useful for metafields that may or may not be present.
|
||||||
|
|
||||||
### Provide fallbacks
|
### Provide fallbacks
|
||||||
|
|
||||||
When extracting metadata try to provide several scenarios for that. For example if `title` is present in several places/sources try extracting from at least some of them. This would make it more future-proof in case some of the sources became unavailable.
|
When extracting metadata try to do so from multiple sources. For example if `title` is present in several places, try extracting from at least some of them. This makes it more future-proof in case some of the sources become unavailable.
|
||||||
|
|
||||||
#### Example
|
#### Example
|
||||||
|
|
||||||
Say `meta` from previous example has a `title` and you are about to extract it. Since `title` is mandatory meta field you should end up with something like:
|
Say `meta` from the previous example has a `title` and you are about to extract it. Since `title` is a mandatory meta field you should end up with something like:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
title = meta['title']
|
title = meta['title']
|
||||||
```
|
```
|
||||||
|
|
||||||
If `title` disappeares from `meta` in future due to some changes on hoster's side the extraction would fail since `title` is mandatory. That's expected.
|
If `title` disappears from `meta` in future due to some changes on the hoster's side the extraction would fail since `title` is mandatory. That's expected.
|
||||||
|
|
||||||
Assume that you have some another source you can extract `title` from, for example `og:title` HTML meta of a `webpage`. In this case you can provide a fallback scenario:
|
Assume that you have some another source you can extract `title` from, for example `og:title` HTML meta of a `webpage`. In this case you can provide a fallback scenario:
|
||||||
|
|
||||||
@ -1120,7 +1120,7 @@ title = self._search_regex(
|
|||||||
webpage, 'title', group='title')
|
webpage, 'title', group='title')
|
||||||
```
|
```
|
||||||
|
|
||||||
Note how you tolerate potential changes in `style` attribute's value or switch from using double quotes to single for `class` attribute:
|
Note how you tolerate potential changes in the `style` attribute's value or switch from using double quotes to single for `class` attribute:
|
||||||
|
|
||||||
The code definitely should not look like:
|
The code definitely should not look like:
|
||||||
|
|
||||||
@ -1190,7 +1190,7 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
|||||||
|
|
||||||
# BUGS
|
# BUGS
|
||||||
|
|
||||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
|
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted to or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
|
||||||
|
|
||||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||||
```
|
```
|
||||||
@ -1206,7 +1206,7 @@ $ youtube-dl -v <your command line>
|
|||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
**Do not post screenshots of verbose log only plain text is acceptable.**
|
**Do not post screenshots of verbose logs; only plain text is acceptable.**
|
||||||
|
|
||||||
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
The output (including the first lines) contains important debugging information. Issues without the full output are often not reproducible and therefore do not get solved in short order, if ever.
|
||||||
|
|
||||||
@ -1260,7 +1260,7 @@ Only post features that you (or an incapacitated friend you can personally talk
|
|||||||
|
|
||||||
### Is your question about youtube-dl?
|
### Is your question about youtube-dl?
|
||||||
|
|
||||||
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different or even the reporter's own application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
It may sound strange, but some bug reports we receive are completely unrelated to youtube-dl and relate to a different, or even the reporter's own, application. Please make sure that you are actually using youtube-dl. If you are using a UI for youtube-dl, report the bug to the maintainer of the actual application providing the UI. On the other hand, if your UI for youtube-dl fails in some way you believe is related to youtube-dl, by all means, go ahead and report the bug.
|
||||||
|
|
||||||
# COPYRIGHT
|
# COPYRIGHT
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
#
|
#
|
||||||
# youtube-dl documentation build configuration file, created by
|
# youtube-dl documentation build configuration file, created by
|
||||||
# sphinx-quickstart on Fri Mar 14 21:05:43 2014.
|
# sphinx-quickstart on Fri Mar 14 21:05:43 2014.
|
||||||
|
@ -86,7 +86,7 @@
|
|||||||
- **bbc.co.uk:article**: BBC articles
|
- **bbc.co.uk:article**: BBC articles
|
||||||
- **bbc.co.uk:iplayer:playlist**
|
- **bbc.co.uk:iplayer:playlist**
|
||||||
- **bbc.co.uk:playlist**
|
- **bbc.co.uk:playlist**
|
||||||
- **BeatportPro**
|
- **Beatport**
|
||||||
- **Beeg**
|
- **Beeg**
|
||||||
- **BehindKink**
|
- **BehindKink**
|
||||||
- **BellMedia**
|
- **BellMedia**
|
||||||
@ -289,6 +289,7 @@
|
|||||||
- **Groupon**
|
- **Groupon**
|
||||||
- **Hark**
|
- **Hark**
|
||||||
- **HBO**
|
- **HBO**
|
||||||
|
- **HBOEpisode**
|
||||||
- **HearThisAt**
|
- **HearThisAt**
|
||||||
- **Heise**
|
- **Heise**
|
||||||
- **HellPorno**
|
- **HellPorno**
|
||||||
@ -307,6 +308,7 @@
|
|||||||
- **HowStuffWorks**
|
- **HowStuffWorks**
|
||||||
- **HRTi**
|
- **HRTi**
|
||||||
- **HRTiPlaylist**
|
- **HRTiPlaylist**
|
||||||
|
- **Huajiao**: 花椒直播
|
||||||
- **HuffPost**: Huffington Post
|
- **HuffPost**: Huffington Post
|
||||||
- **Hypem**
|
- **Hypem**
|
||||||
- **Iconosquare**
|
- **Iconosquare**
|
||||||
@ -330,6 +332,8 @@
|
|||||||
- **ivideon**: Ivideon TV
|
- **ivideon**: Ivideon TV
|
||||||
- **Iwara**
|
- **Iwara**
|
||||||
- **Izlesene**
|
- **Izlesene**
|
||||||
|
- **Jamendo**
|
||||||
|
- **JamendoAlbum**
|
||||||
- **JeuxVideo**
|
- **JeuxVideo**
|
||||||
- **Jove**
|
- **Jove**
|
||||||
- **jpopsuki.tv**
|
- **jpopsuki.tv**
|
||||||
@ -364,6 +368,7 @@
|
|||||||
- **Le**: 乐视网
|
- **Le**: 乐视网
|
||||||
- **Learnr**
|
- **Learnr**
|
||||||
- **Lecture2Go**
|
- **Lecture2Go**
|
||||||
|
- **LEGO**
|
||||||
- **Lemonde**
|
- **Lemonde**
|
||||||
- **LePlaylist**
|
- **LePlaylist**
|
||||||
- **LetvCloud**: 乐视云
|
- **LetvCloud**: 乐视云
|
||||||
@ -478,11 +483,13 @@
|
|||||||
- **nhl.com:videocenter:category**: NHL videocenter category
|
- **nhl.com:videocenter:category**: NHL videocenter category
|
||||||
- **nick.com**
|
- **nick.com**
|
||||||
- **nick.de**
|
- **nick.de**
|
||||||
|
- **nicknight**
|
||||||
- **niconico**: ニコニコ動画
|
- **niconico**: ニコニコ動画
|
||||||
- **NiconicoPlaylist**
|
- **NiconicoPlaylist**
|
||||||
- **Nintendo**
|
- **Nintendo**
|
||||||
- **njoy**: N-JOY
|
- **njoy**: N-JOY
|
||||||
- **njoy:embed**
|
- **njoy:embed**
|
||||||
|
- **NobelPrize**
|
||||||
- **Noco**
|
- **Noco**
|
||||||
- **Normalboots**
|
- **Normalboots**
|
||||||
- **NosVideo**
|
- **NosVideo**
|
||||||
@ -507,6 +514,7 @@
|
|||||||
- **Nuvid**
|
- **Nuvid**
|
||||||
- **NYTimes**
|
- **NYTimes**
|
||||||
- **NYTimesArticle**
|
- **NYTimesArticle**
|
||||||
|
- **NZZ**
|
||||||
- **ocw.mit.edu**
|
- **ocw.mit.edu**
|
||||||
- **OdaTV**
|
- **OdaTV**
|
||||||
- **Odnoklassniki**
|
- **Odnoklassniki**
|
||||||
@ -523,6 +531,7 @@
|
|||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
- **orf:oe1**: Radio Österreich 1
|
- **orf:oe1**: Radio Österreich 1
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
|
- **PandaTV**: 熊猫TV
|
||||||
- **pandora.tv**: 판도라TV
|
- **pandora.tv**: 판도라TV
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
@ -582,6 +591,8 @@
|
|||||||
- **RDS**: RDS.ca
|
- **RDS**: RDS.ca
|
||||||
- **RedTube**
|
- **RedTube**
|
||||||
- **RegioTV**
|
- **RegioTV**
|
||||||
|
- **RENTV**
|
||||||
|
- **RENTVArticle**
|
||||||
- **Restudy**
|
- **Restudy**
|
||||||
- **Reuters**
|
- **Reuters**
|
||||||
- **ReverbNation**
|
- **ReverbNation**
|
||||||
@ -637,7 +648,7 @@
|
|||||||
- **ServingSys**
|
- **ServingSys**
|
||||||
- **Sexu**
|
- **Sexu**
|
||||||
- **Shahid**
|
- **Shahid**
|
||||||
- **Shared**: shared.sx and vivo.sx
|
- **Shared**: shared.sx
|
||||||
- **ShareSix**
|
- **ShareSix**
|
||||||
- **Sina**
|
- **Sina**
|
||||||
- **SixPlay**
|
- **SixPlay**
|
||||||
@ -692,6 +703,7 @@
|
|||||||
- **SWRMediathek**
|
- **SWRMediathek**
|
||||||
- **Syfy**
|
- **Syfy**
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
|
- **t-online.de**
|
||||||
- **Tagesschau**
|
- **Tagesschau**
|
||||||
- **tagesschau:player**
|
- **tagesschau:player**
|
||||||
- **Tass**
|
- **Tass**
|
||||||
@ -716,13 +728,16 @@
|
|||||||
- **TF1**
|
- **TF1**
|
||||||
- **TFO**
|
- **TFO**
|
||||||
- **TheIntercept**
|
- **TheIntercept**
|
||||||
|
- **theoperaplatform**
|
||||||
- **ThePlatform**
|
- **ThePlatform**
|
||||||
- **ThePlatformFeed**
|
- **ThePlatformFeed**
|
||||||
- **TheScene**
|
- **TheScene**
|
||||||
- **TheSixtyOne**
|
- **TheSixtyOne**
|
||||||
- **TheStar**
|
- **TheStar**
|
||||||
|
- **TheWeatherChannel**
|
||||||
- **ThisAmericanLife**
|
- **ThisAmericanLife**
|
||||||
- **ThisAV**
|
- **ThisAV**
|
||||||
|
- **ThisOldHouse**
|
||||||
- **tinypic**: tinypic.com videos
|
- **tinypic**: tinypic.com videos
|
||||||
- **tlc.de**
|
- **tlc.de**
|
||||||
- **TMZ**
|
- **TMZ**
|
||||||
@ -840,6 +855,7 @@
|
|||||||
- **Vimple**: Vimple - one-click video hosting
|
- **Vimple**: Vimple - one-click video hosting
|
||||||
- **Vine**
|
- **Vine**
|
||||||
- **vine:user**
|
- **vine:user**
|
||||||
|
- **Vivo**: vivo.sx
|
||||||
- **vk**: VK
|
- **vk**: VK
|
||||||
- **vk:uservideos**: VK - User's Videos
|
- **vk:uservideos**: VK - User's Videos
|
||||||
- **vk:wallpost**
|
- **vk:wallpost**
|
||||||
|
2
setup.py
2
setup.py
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
@ -605,6 +605,7 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
'extractor': 'TEST',
|
'extractor': 'TEST',
|
||||||
'duration': 30,
|
'duration': 30,
|
||||||
'filesize': 10 * 1024,
|
'filesize': 10 * 1024,
|
||||||
|
'playlist_id': '42',
|
||||||
}
|
}
|
||||||
second = {
|
second = {
|
||||||
'id': '2',
|
'id': '2',
|
||||||
@ -614,6 +615,7 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
'duration': 10,
|
'duration': 10,
|
||||||
'description': 'foo',
|
'description': 'foo',
|
||||||
'filesize': 5 * 1024,
|
'filesize': 5 * 1024,
|
||||||
|
'playlist_id': '43',
|
||||||
}
|
}
|
||||||
videos = [first, second]
|
videos = [first, second]
|
||||||
|
|
||||||
@ -650,6 +652,10 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
res = get_videos(f)
|
res = get_videos(f)
|
||||||
self.assertEqual(res, ['1'])
|
self.assertEqual(res, ['1'])
|
||||||
|
|
||||||
|
f = match_filter_func('playlist_id = 42')
|
||||||
|
res = get_videos(f)
|
||||||
|
self.assertEqual(res, ['1'])
|
||||||
|
|
||||||
def test_playlist_items_selection(self):
|
def test_playlist_items_selection(self):
|
||||||
entries = [{
|
entries = [{
|
||||||
'id': compat_str(i),
|
'id': compat_str(i),
|
||||||
|
@ -87,7 +87,7 @@ class TestHTTP(unittest.TestCase):
|
|||||||
|
|
||||||
ydl = YoutubeDL({'logger': FakeLogger()})
|
ydl = YoutubeDL({'logger': FakeLogger()})
|
||||||
r = ydl.extract_info('http://localhost:%d/302' % self.port)
|
r = ydl.extract_info('http://localhost:%d/302' % self.port)
|
||||||
self.assertEqual(r['url'], 'http://localhost:%d/vid.mp4' % self.port)
|
self.assertEqual(r['entries'][0]['url'], 'http://localhost:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
|
|
||||||
class TestHTTPS(unittest.TestCase):
|
class TestHTTPS(unittest.TestCase):
|
||||||
@ -111,7 +111,7 @@ class TestHTTPS(unittest.TestCase):
|
|||||||
|
|
||||||
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
||||||
r = ydl.extract_info('https://localhost:%d/video.html' % self.port)
|
r = ydl.extract_info('https://localhost:%d/video.html' % self.port)
|
||||||
self.assertEqual(r['url'], 'https://localhost:%d/vid.mp4' % self.port)
|
self.assertEqual(r['entries'][0]['url'], 'https://localhost:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
|
|
||||||
def _build_proxy_handler(name):
|
def _build_proxy_handler(name):
|
||||||
|
@ -69,6 +69,7 @@ from youtube_dl.utils import (
|
|||||||
uppercase_escape,
|
uppercase_escape,
|
||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
url_basename,
|
url_basename,
|
||||||
|
base_url,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
urshift,
|
urshift,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
@ -437,6 +438,13 @@ class TestUtil(unittest.TestCase):
|
|||||||
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
|
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
|
||||||
'trailer.mp4')
|
'trailer.mp4')
|
||||||
|
|
||||||
|
def test_base_url(self):
|
||||||
|
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
|
||||||
|
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
|
||||||
|
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
|
||||||
|
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
|
||||||
|
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
|
||||||
|
|
||||||
def test_parse_age_limit(self):
|
def test_parse_age_limit(self):
|
||||||
self.assertEqual(parse_age_limit(None), None)
|
self.assertEqual(parse_age_limit(None), None)
|
||||||
self.assertEqual(parse_age_limit(False), None)
|
self.assertEqual(parse_age_limit(False), None)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import absolute_import, unicode_literals
|
from __future__ import absolute_import, unicode_literals
|
||||||
|
|
||||||
@ -1658,7 +1658,7 @@ class YoutubeDL(object):
|
|||||||
video_ext, audio_ext = audio.get('ext'), video.get('ext')
|
video_ext, audio_ext = audio.get('ext'), video.get('ext')
|
||||||
if video_ext and audio_ext:
|
if video_ext and audio_ext:
|
||||||
COMPATIBLE_EXTS = (
|
COMPATIBLE_EXTS = (
|
||||||
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v'),
|
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
|
||||||
('webm')
|
('webm')
|
||||||
)
|
)
|
||||||
for exts in COMPATIBLE_EXTS:
|
for exts in COMPATIBLE_EXTS:
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@ from .http import HttpFD
|
|||||||
from .rtmp import RtmpFD
|
from .rtmp import RtmpFD
|
||||||
from .dash import DashSegmentsFD
|
from .dash import DashSegmentsFD
|
||||||
from .rtsp import RtspFD
|
from .rtsp import RtspFD
|
||||||
|
from .ism import IsmFD
|
||||||
from .external import (
|
from .external import (
|
||||||
get_external_downloader,
|
get_external_downloader,
|
||||||
FFmpegFD,
|
FFmpegFD,
|
||||||
@ -24,6 +25,7 @@ PROTOCOL_MAP = {
|
|||||||
'rtsp': RtspFD,
|
'rtsp': RtspFD,
|
||||||
'f4m': F4mFD,
|
'f4m': F4mFD,
|
||||||
'http_dash_segments': DashSegmentsFD,
|
'http_dash_segments': DashSegmentsFD,
|
||||||
|
'ism': IsmFD,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -346,7 +346,6 @@ class FileDownloader(object):
|
|||||||
min_sleep_interval = self.params.get('sleep_interval')
|
min_sleep_interval = self.params.get('sleep_interval')
|
||||||
if min_sleep_interval:
|
if min_sleep_interval:
|
||||||
max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
|
max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
|
||||||
print(min_sleep_interval, max_sleep_interval)
|
|
||||||
sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
|
sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
|
||||||
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
|
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
|
||||||
time.sleep(sleep_interval)
|
time.sleep(sleep_interval)
|
||||||
|
273
youtube_dl/downloader/ism.py
Normal file
273
youtube_dl/downloader/ism.py
Normal file
@ -0,0 +1,273 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import struct
|
||||||
|
import binascii
|
||||||
|
import io
|
||||||
|
|
||||||
|
from .fragment import FragmentFD
|
||||||
|
from ..compat import compat_urllib_error
|
||||||
|
from ..utils import (
|
||||||
|
sanitize_open,
|
||||||
|
encodeFilename,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
u8 = struct.Struct(b'>B')
|
||||||
|
u88 = struct.Struct(b'>Bx')
|
||||||
|
u16 = struct.Struct(b'>H')
|
||||||
|
u1616 = struct.Struct(b'>Hxx')
|
||||||
|
u32 = struct.Struct(b'>I')
|
||||||
|
u64 = struct.Struct(b'>Q')
|
||||||
|
|
||||||
|
s88 = struct.Struct(b'>bx')
|
||||||
|
s16 = struct.Struct(b'>h')
|
||||||
|
s1616 = struct.Struct(b'>hxx')
|
||||||
|
s32 = struct.Struct(b'>i')
|
||||||
|
|
||||||
|
unity_matrix = (s32.pack(0x10000) + s32.pack(0) * 3) * 2 + s32.pack(0x40000000)
|
||||||
|
|
||||||
|
TRACK_ENABLED = 0x1
|
||||||
|
TRACK_IN_MOVIE = 0x2
|
||||||
|
TRACK_IN_PREVIEW = 0x4
|
||||||
|
|
||||||
|
SELF_CONTAINED = 0x1
|
||||||
|
|
||||||
|
|
||||||
|
def box(box_type, payload):
|
||||||
|
return u32.pack(8 + len(payload)) + box_type + payload
|
||||||
|
|
||||||
|
|
||||||
|
def full_box(box_type, version, flags, payload):
|
||||||
|
return box(box_type, u8.pack(version) + u32.pack(flags)[1:] + payload)
|
||||||
|
|
||||||
|
|
||||||
|
def write_piff_header(stream, params):
|
||||||
|
track_id = params['track_id']
|
||||||
|
fourcc = params['fourcc']
|
||||||
|
duration = params['duration']
|
||||||
|
timescale = params.get('timescale', 10000000)
|
||||||
|
language = params.get('language', 'und')
|
||||||
|
height = params.get('height', 0)
|
||||||
|
width = params.get('width', 0)
|
||||||
|
is_audio = width == 0 and height == 0
|
||||||
|
creation_time = modification_time = int(time.time())
|
||||||
|
|
||||||
|
ftyp_payload = b'isml' # major brand
|
||||||
|
ftyp_payload += u32.pack(1) # minor version
|
||||||
|
ftyp_payload += b'piff' + b'iso2' # compatible brands
|
||||||
|
stream.write(box(b'ftyp', ftyp_payload)) # File Type Box
|
||||||
|
|
||||||
|
mvhd_payload = u64.pack(creation_time)
|
||||||
|
mvhd_payload += u64.pack(modification_time)
|
||||||
|
mvhd_payload += u32.pack(timescale)
|
||||||
|
mvhd_payload += u64.pack(duration)
|
||||||
|
mvhd_payload += s1616.pack(1) # rate
|
||||||
|
mvhd_payload += s88.pack(1) # volume
|
||||||
|
mvhd_payload += u16.pack(0) # reserved
|
||||||
|
mvhd_payload += u32.pack(0) * 2 # reserved
|
||||||
|
mvhd_payload += unity_matrix
|
||||||
|
mvhd_payload += u32.pack(0) * 6 # pre defined
|
||||||
|
mvhd_payload += u32.pack(0xffffffff) # next track id
|
||||||
|
moov_payload = full_box(b'mvhd', 1, 0, mvhd_payload) # Movie Header Box
|
||||||
|
|
||||||
|
tkhd_payload = u64.pack(creation_time)
|
||||||
|
tkhd_payload += u64.pack(modification_time)
|
||||||
|
tkhd_payload += u32.pack(track_id) # track id
|
||||||
|
tkhd_payload += u32.pack(0) # reserved
|
||||||
|
tkhd_payload += u64.pack(duration)
|
||||||
|
tkhd_payload += u32.pack(0) * 2 # reserved
|
||||||
|
tkhd_payload += s16.pack(0) # layer
|
||||||
|
tkhd_payload += s16.pack(0) # alternate group
|
||||||
|
tkhd_payload += s88.pack(1 if is_audio else 0) # volume
|
||||||
|
tkhd_payload += u16.pack(0) # reserved
|
||||||
|
tkhd_payload += unity_matrix
|
||||||
|
tkhd_payload += u1616.pack(width)
|
||||||
|
tkhd_payload += u1616.pack(height)
|
||||||
|
trak_payload = full_box(b'tkhd', 1, TRACK_ENABLED | TRACK_IN_MOVIE | TRACK_IN_PREVIEW, tkhd_payload) # Track Header Box
|
||||||
|
|
||||||
|
mdhd_payload = u64.pack(creation_time)
|
||||||
|
mdhd_payload += u64.pack(modification_time)
|
||||||
|
mdhd_payload += u32.pack(timescale)
|
||||||
|
mdhd_payload += u64.pack(duration)
|
||||||
|
mdhd_payload += u16.pack(((ord(language[0]) - 0x60) << 10) | ((ord(language[1]) - 0x60) << 5) | (ord(language[2]) - 0x60))
|
||||||
|
mdhd_payload += u16.pack(0) # pre defined
|
||||||
|
mdia_payload = full_box(b'mdhd', 1, 0, mdhd_payload) # Media Header Box
|
||||||
|
|
||||||
|
hdlr_payload = u32.pack(0) # pre defined
|
||||||
|
hdlr_payload += b'soun' if is_audio else b'vide' # handler type
|
||||||
|
hdlr_payload += u32.pack(0) * 3 # reserved
|
||||||
|
hdlr_payload += (b'Sound' if is_audio else b'Video') + b'Handler\0' # name
|
||||||
|
mdia_payload += full_box(b'hdlr', 0, 0, hdlr_payload) # Handler Reference Box
|
||||||
|
|
||||||
|
if is_audio:
|
||||||
|
smhd_payload = s88.pack(0) # balance
|
||||||
|
smhd_payload = u16.pack(0) # reserved
|
||||||
|
media_header_box = full_box(b'smhd', 0, 0, smhd_payload) # Sound Media Header
|
||||||
|
else:
|
||||||
|
vmhd_payload = u16.pack(0) # graphics mode
|
||||||
|
vmhd_payload += u16.pack(0) * 3 # opcolor
|
||||||
|
media_header_box = full_box(b'vmhd', 0, 1, vmhd_payload) # Video Media Header
|
||||||
|
minf_payload = media_header_box
|
||||||
|
|
||||||
|
dref_payload = u32.pack(1) # entry count
|
||||||
|
dref_payload += full_box(b'url ', 0, SELF_CONTAINED, b'') # Data Entry URL Box
|
||||||
|
dinf_payload = full_box(b'dref', 0, 0, dref_payload) # Data Reference Box
|
||||||
|
minf_payload += box(b'dinf', dinf_payload) # Data Information Box
|
||||||
|
|
||||||
|
stsd_payload = u32.pack(1) # entry count
|
||||||
|
|
||||||
|
sample_entry_payload = u8.pack(0) * 6 # reserved
|
||||||
|
sample_entry_payload += u16.pack(1) # data reference index
|
||||||
|
if is_audio:
|
||||||
|
sample_entry_payload += u32.pack(0) * 2 # reserved
|
||||||
|
sample_entry_payload += u16.pack(params.get('channels', 2))
|
||||||
|
sample_entry_payload += u16.pack(params.get('bits_per_sample', 16))
|
||||||
|
sample_entry_payload += u16.pack(0) # pre defined
|
||||||
|
sample_entry_payload += u16.pack(0) # reserved
|
||||||
|
sample_entry_payload += u1616.pack(params['sampling_rate'])
|
||||||
|
|
||||||
|
if fourcc == 'AACL':
|
||||||
|
smaple_entry_box = box(b'mp4a', sample_entry_payload)
|
||||||
|
else:
|
||||||
|
sample_entry_payload = sample_entry_payload
|
||||||
|
sample_entry_payload += u16.pack(0) # pre defined
|
||||||
|
sample_entry_payload += u16.pack(0) # reserved
|
||||||
|
sample_entry_payload += u32.pack(0) * 3 # pre defined
|
||||||
|
sample_entry_payload += u16.pack(width)
|
||||||
|
sample_entry_payload += u16.pack(height)
|
||||||
|
sample_entry_payload += u1616.pack(0x48) # horiz resolution 72 dpi
|
||||||
|
sample_entry_payload += u1616.pack(0x48) # vert resolution 72 dpi
|
||||||
|
sample_entry_payload += u32.pack(0) # reserved
|
||||||
|
sample_entry_payload += u16.pack(1) # frame count
|
||||||
|
sample_entry_payload += u8.pack(0) * 32 # compressor name
|
||||||
|
sample_entry_payload += u16.pack(0x18) # depth
|
||||||
|
sample_entry_payload += s16.pack(-1) # pre defined
|
||||||
|
|
||||||
|
codec_private_data = binascii.unhexlify(params['codec_private_data'])
|
||||||
|
if fourcc in ('H264', 'AVC1'):
|
||||||
|
sps, pps = codec_private_data.split(u32.pack(1))[1:]
|
||||||
|
avcc_payload = u8.pack(1) # configuration version
|
||||||
|
avcc_payload += sps[1] # avc profile indication
|
||||||
|
avcc_payload += sps[2] # profile compatibility
|
||||||
|
avcc_payload += sps[3] # avc level indication
|
||||||
|
avcc_payload += u8.pack(0xfc | (params.get('nal_unit_length_field', 4) - 1)) # complete represenation (1) + reserved (11111) + length size minus one
|
||||||
|
avcc_payload += u8.pack(1) # reserved (0) + number of sps (0000001)
|
||||||
|
avcc_payload += u16.pack(len(sps))
|
||||||
|
avcc_payload += sps
|
||||||
|
avcc_payload += u8.pack(1) # number of pps
|
||||||
|
avcc_payload += u16.pack(len(pps))
|
||||||
|
avcc_payload += pps
|
||||||
|
sample_entry_payload += box(b'avcC', avcc_payload) # AVC Decoder Configuration Record
|
||||||
|
smaple_entry_box = box(b'avc1', sample_entry_payload) # AVC Simple Entry
|
||||||
|
stsd_payload += smaple_entry_box
|
||||||
|
|
||||||
|
stbl_payload = full_box(b'stsd', 0, 0, stsd_payload) # Sample Description Box
|
||||||
|
|
||||||
|
stts_payload = u32.pack(0) # entry count
|
||||||
|
stbl_payload += full_box(b'stts', 0, 0, stts_payload) # Decoding Time to Sample Box
|
||||||
|
|
||||||
|
stsc_payload = u32.pack(0) # entry count
|
||||||
|
stbl_payload += full_box(b'stsc', 0, 0, stsc_payload) # Sample To Chunk Box
|
||||||
|
|
||||||
|
stco_payload = u32.pack(0) # entry count
|
||||||
|
stbl_payload += full_box(b'stco', 0, 0, stco_payload) # Chunk Offset Box
|
||||||
|
|
||||||
|
minf_payload += box(b'stbl', stbl_payload) # Sample Table Box
|
||||||
|
|
||||||
|
mdia_payload += box(b'minf', minf_payload) # Media Information Box
|
||||||
|
|
||||||
|
trak_payload += box(b'mdia', mdia_payload) # Media Box
|
||||||
|
|
||||||
|
moov_payload += box(b'trak', trak_payload) # Track Box
|
||||||
|
|
||||||
|
mehd_payload = u64.pack(duration)
|
||||||
|
mvex_payload = full_box(b'mehd', 1, 0, mehd_payload) # Movie Extends Header Box
|
||||||
|
|
||||||
|
trex_payload = u32.pack(track_id) # track id
|
||||||
|
trex_payload += u32.pack(1) # default sample description index
|
||||||
|
trex_payload += u32.pack(0) # default sample duration
|
||||||
|
trex_payload += u32.pack(0) # default sample size
|
||||||
|
trex_payload += u32.pack(0) # default sample flags
|
||||||
|
mvex_payload += full_box(b'trex', 0, 0, trex_payload) # Track Extends Box
|
||||||
|
|
||||||
|
moov_payload += box(b'mvex', mvex_payload) # Movie Extends Box
|
||||||
|
stream.write(box(b'moov', moov_payload)) # Movie Box
|
||||||
|
|
||||||
|
|
||||||
|
def extract_box_data(data, box_sequence):
|
||||||
|
data_reader = io.BytesIO(data)
|
||||||
|
while True:
|
||||||
|
box_size = u32.unpack(data_reader.read(4))[0]
|
||||||
|
box_type = data_reader.read(4)
|
||||||
|
if box_type == box_sequence[0]:
|
||||||
|
box_data = data_reader.read(box_size - 8)
|
||||||
|
if len(box_sequence) == 1:
|
||||||
|
return box_data
|
||||||
|
return extract_box_data(box_data, box_sequence[1:])
|
||||||
|
data_reader.seek(box_size - 8, 1)
|
||||||
|
|
||||||
|
|
||||||
|
class IsmFD(FragmentFD):
|
||||||
|
"""
|
||||||
|
Download segments in a ISM manifest
|
||||||
|
"""
|
||||||
|
|
||||||
|
FD_NAME = 'ism'
|
||||||
|
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
segments = info_dict['fragments'][:1] if self.params.get(
|
||||||
|
'test', False) else info_dict['fragments']
|
||||||
|
|
||||||
|
ctx = {
|
||||||
|
'filename': filename,
|
||||||
|
'total_frags': len(segments),
|
||||||
|
}
|
||||||
|
|
||||||
|
self._prepare_and_start_frag_download(ctx)
|
||||||
|
|
||||||
|
segments_filenames = []
|
||||||
|
|
||||||
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
|
|
||||||
|
track_written = False
|
||||||
|
for i, segment in enumerate(segments):
|
||||||
|
segment_url = segment['url']
|
||||||
|
segment_name = 'Frag%d' % i
|
||||||
|
target_filename = '%s-%s' % (ctx['tmpfilename'], segment_name)
|
||||||
|
count = 0
|
||||||
|
while count <= fragment_retries:
|
||||||
|
try:
|
||||||
|
success = ctx['dl'].download(target_filename, {'url': segment_url})
|
||||||
|
if not success:
|
||||||
|
return False
|
||||||
|
down, target_sanitized = sanitize_open(target_filename, 'rb')
|
||||||
|
down_data = down.read()
|
||||||
|
if not track_written:
|
||||||
|
tfhd_data = extract_box_data(down_data, [b'moof', b'traf', b'tfhd'])
|
||||||
|
info_dict['_download_params']['track_id'] = u32.unpack(tfhd_data[4:8])[0]
|
||||||
|
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
||||||
|
track_written = True
|
||||||
|
ctx['dest_stream'].write(down_data)
|
||||||
|
down.close()
|
||||||
|
segments_filenames.append(target_sanitized)
|
||||||
|
break
|
||||||
|
except compat_urllib_error.HTTPError as err:
|
||||||
|
count += 1
|
||||||
|
if count <= fragment_retries:
|
||||||
|
self.report_retry_fragment(err, segment_name, count, fragment_retries)
|
||||||
|
if count > fragment_retries:
|
||||||
|
if skip_unavailable_fragments:
|
||||||
|
self.report_skip_fragment(segment_name)
|
||||||
|
continue
|
||||||
|
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||||
|
return False
|
||||||
|
|
||||||
|
self._finish_frag_download(ctx)
|
||||||
|
|
||||||
|
for segment_file in segments_filenames:
|
||||||
|
os.remove(encodeFilename(segment_file))
|
||||||
|
|
||||||
|
return True
|
@ -102,16 +102,16 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
|
|
||||||
# ABC iview programs are normally available for 14 days only.
|
# ABC iview programs are normally available for 14 days only.
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://iview.abc.net.au/programs/gardening-australia/FA1505V024S00',
|
'url': 'http://iview.abc.net.au/programs/diaries-of-a-broken-mind/ZX9735A001S00',
|
||||||
'md5': '979d10b2939101f0d27a06b79edad536',
|
'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'FA1505V024S00',
|
'id': 'ZX9735A001S00',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Series 27 Ep 24',
|
'title': 'Diaries Of A Broken Mind',
|
||||||
'description': 'md5:b28baeae7504d1148e1d2f0e3ed3c15d',
|
'description': 'md5:7de3903874b7a1be279fe6b68718fc9e',
|
||||||
'upload_date': '20160820',
|
'upload_date': '20161010',
|
||||||
'uploader_id': 'abc1',
|
'uploader_id': 'abc2',
|
||||||
'timestamp': 1471719600,
|
'timestamp': 1476064920,
|
||||||
},
|
},
|
||||||
'skip': 'Video gone',
|
'skip': 'Video gone',
|
||||||
}]
|
}]
|
||||||
@ -121,7 +121,7 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
video_params = self._parse_json(self._search_regex(
|
video_params = self._parse_json(self._search_regex(
|
||||||
r'videoParams\s*=\s*({.+?});', webpage, 'video params'), video_id)
|
r'videoParams\s*=\s*({.+?});', webpage, 'video params'), video_id)
|
||||||
title = video_params['title']
|
title = video_params.get('title') or video_params['seriesTitle']
|
||||||
stream = next(s for s in video_params['playlist'] if s.get('type') == 'program')
|
stream = next(s for s in video_params['playlist'] if s.get('type') == 'program')
|
||||||
|
|
||||||
formats = self._extract_akamai_formats(stream['hds-unmetered'], video_id)
|
formats = self._extract_akamai_formats(stream['hds-unmetered'], video_id)
|
||||||
@ -144,8 +144,8 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
|
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
|
||||||
'series': video_params.get('seriesTitle'),
|
'series': video_params.get('seriesTitle'),
|
||||||
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
|
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
|
||||||
'episode_number': int_or_none(self._html_search_meta('episodeNumber', webpage)),
|
'episode_number': int_or_none(self._html_search_meta('episodeNumber', webpage, default=None)),
|
||||||
'episode': self._html_search_meta('episode_title', webpage),
|
'episode': self._html_search_meta('episode_title', webpage, default=None),
|
||||||
'uploader_id': video_params.get('channel'),
|
'uploader_id': video_params.get('channel'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
@ -26,6 +26,11 @@ MSO_INFO = {
|
|||||||
'username_field': 'UserName',
|
'username_field': 'UserName',
|
||||||
'password_field': 'UserPassword',
|
'password_field': 'UserPassword',
|
||||||
},
|
},
|
||||||
|
'Comcast_SSO': {
|
||||||
|
'name': 'Comcast XFINITY',
|
||||||
|
'username_field': 'user',
|
||||||
|
'password_field': 'passwd',
|
||||||
|
},
|
||||||
'thr030': {
|
'thr030': {
|
||||||
'name': '3 Rivers Communications'
|
'name': '3 Rivers Communications'
|
||||||
},
|
},
|
||||||
@ -1364,14 +1369,53 @@ class AdobePassIE(InfoExtractor):
|
|||||||
'domain_name': 'adobe.com',
|
'domain_name': 'adobe.com',
|
||||||
'redirect_url': url,
|
'redirect_url': url,
|
||||||
})
|
})
|
||||||
provider_login_page_res = post_form(
|
|
||||||
provider_redirect_page_res, 'Downloading Provider Login Page')
|
if mso_id == 'Comcast_SSO':
|
||||||
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', {
|
# Comcast page flow varies by video site and whether you
|
||||||
mso_info.get('username_field', 'username'): username,
|
# are on Comcast's network.
|
||||||
mso_info.get('password_field', 'password'): password,
|
provider_redirect_page, urlh = provider_redirect_page_res
|
||||||
})
|
# Check for Comcast auto login
|
||||||
if mso_id != 'Rogers':
|
if 'automatically signing you in' in provider_redirect_page:
|
||||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
oauth_redirect_url = self._html_search_regex(
|
||||||
|
r'window\.location\s*=\s*[\'"]([^\'"]+)',
|
||||||
|
provider_redirect_page, 'oauth redirect')
|
||||||
|
# Just need to process the request. No useful data comes back
|
||||||
|
self._download_webpage(
|
||||||
|
oauth_redirect_url, video_id, 'Confirming auto login')
|
||||||
|
else:
|
||||||
|
if '<form name="signin"' in provider_redirect_page:
|
||||||
|
# already have the form, just fill it
|
||||||
|
provider_login_page_res = provider_redirect_page_res
|
||||||
|
elif 'http-equiv="refresh"' in provider_redirect_page:
|
||||||
|
# redirects to the login page
|
||||||
|
oauth_redirect_url = self._html_search_regex(
|
||||||
|
r'content="0;\s*url=([^\'"]+)',
|
||||||
|
provider_redirect_page, 'meta refresh redirect')
|
||||||
|
provider_login_page_res = self._download_webpage_handle(
|
||||||
|
oauth_redirect_url,
|
||||||
|
video_id, 'Downloading Provider Login Page')
|
||||||
|
else:
|
||||||
|
provider_login_page_res = post_form(
|
||||||
|
provider_redirect_page_res, 'Downloading Provider Login Page')
|
||||||
|
|
||||||
|
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', {
|
||||||
|
mso_info.get('username_field', 'username'): username,
|
||||||
|
mso_info.get('password_field', 'password'): password,
|
||||||
|
})
|
||||||
|
mvpd_confirm_page, urlh = mvpd_confirm_page_res
|
||||||
|
if '<button class="submit" value="Resume">Resume</button>' in mvpd_confirm_page:
|
||||||
|
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Normal, non-Comcast flow
|
||||||
|
provider_login_page_res = post_form(
|
||||||
|
provider_redirect_page_res, 'Downloading Provider Login Page')
|
||||||
|
mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', {
|
||||||
|
mso_info.get('username_field', 'username'): username,
|
||||||
|
mso_info.get('password_field', 'password'): password,
|
||||||
|
})
|
||||||
|
if mso_id != 'Rogers':
|
||||||
|
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||||
|
|
||||||
session = self._download_webpage(
|
session = self._download_webpage(
|
||||||
self._SERVICE_PROVIDER_TEMPLATE % 'session', video_id,
|
self._SERVICE_PROVIDER_TEMPLATE % 'session', video_id,
|
||||||
|
@ -96,6 +96,27 @@ class AdultSwimIE(TurnerBaseIE):
|
|||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
'expected_warnings': ['Unable to download f4m manifest'],
|
'expected_warnings': ['Unable to download f4m manifest'],
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.adultswim.com/videos/toonami/friday-october-14th-2016/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'eYiLsKVgQ6qTC6agD67Sig',
|
||||||
|
'title': 'Toonami - Friday, October 14th, 2016',
|
||||||
|
'description': 'md5:99892c96ffc85e159a428de85c30acde',
|
||||||
|
},
|
||||||
|
'playlist': [{
|
||||||
|
'md5': '',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'eYiLsKVgQ6qTC6agD67Sig',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Toonami - Friday, October 14th, 2016',
|
||||||
|
'description': 'md5:99892c96ffc85e159a428de85c30acde',
|
||||||
|
},
|
||||||
|
}],
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
'expected_warnings': ['Unable to download f4m manifest'],
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -163,6 +184,8 @@ class AdultSwimIE(TurnerBaseIE):
|
|||||||
segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
|
segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
|
||||||
elif video_info.get('videoPlaybackID'):
|
elif video_info.get('videoPlaybackID'):
|
||||||
segment_ids = [video_info['videoPlaybackID']]
|
segment_ids = [video_info['videoPlaybackID']]
|
||||||
|
elif video_info.get('id'):
|
||||||
|
segment_ids = [video_info['id']]
|
||||||
else:
|
else:
|
||||||
if video_info.get('auth') is True:
|
if video_info.get('auth') is True:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
|
@ -1,29 +1,26 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
remove_end,
|
||||||
qualities,
|
qualities,
|
||||||
unescapeHTML,
|
url_basename,
|
||||||
xpath_element,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class AllocineIE(InfoExtractor):
|
class AllocineIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?P<typ>article|video|film)/(fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=|video-)(?P<id>[0-9]+)(?:\.html)?'
|
_VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?:article|video|film)/(?:fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=|video-)(?P<id>[0-9]+)(?:\.html)?'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html',
|
'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html',
|
||||||
'md5': '0c9fcf59a841f65635fa300ac43d8269',
|
'md5': '0c9fcf59a841f65635fa300ac43d8269',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '19546517',
|
'id': '19546517',
|
||||||
|
'display_id': '18635087',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
|
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
|
||||||
'description': 'md5:abcd09ce503c6560512c14ebfdb720d2',
|
'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -31,64 +28,82 @@ class AllocineIE(InfoExtractor):
|
|||||||
'md5': 'd0cdce5d2b9522ce279fdfec07ff16e0',
|
'md5': 'd0cdce5d2b9522ce279fdfec07ff16e0',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '19540403',
|
'id': '19540403',
|
||||||
|
'display_id': '19540403',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Planes 2 Bande-annonce VF',
|
'title': 'Planes 2 Bande-annonce VF',
|
||||||
'description': 'Regardez la bande annonce du film Planes 2 (Planes 2 Bande-annonce VF). Planes 2, un film de Roberts Gannaway',
|
'description': 'Regardez la bande annonce du film Planes 2 (Planes 2 Bande-annonce VF). Planes 2, un film de Roberts Gannaway',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.allocine.fr/film/fichefilm_gen_cfilm=181290.html',
|
'url': 'http://www.allocine.fr/video/player_gen_cmedia=19544709&cfilm=181290.html',
|
||||||
'md5': '101250fb127ef9ca3d73186ff22a47ce',
|
'md5': '101250fb127ef9ca3d73186ff22a47ce',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '19544709',
|
'id': '19544709',
|
||||||
|
'display_id': '19544709',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Dragons 2 - Bande annonce finale VF',
|
'title': 'Dragons 2 - Bande annonce finale VF',
|
||||||
'description': 'md5:601d15393ac40f249648ef000720e7e3',
|
'description': 'md5:6cdd2d7c2687d4c6aafe80a35e17267a',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.allocine.fr/video/video-19550147/',
|
'url': 'http://www.allocine.fr/video/video-19550147/',
|
||||||
'only_matching': True,
|
'md5': '3566c0668c0235e2d224fd8edb389f67',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '19550147',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Faux Raccord N°123 - Les gaffes de Cliffhanger',
|
||||||
|
'description': 'md5:bc734b83ffa2d8a12188d9eb48bb6354',
|
||||||
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
display_id = self._match_id(url)
|
||||||
typ = mobj.group('typ')
|
|
||||||
display_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
if typ == 'film':
|
formats = []
|
||||||
video_id = self._search_regex(r'href="/video/player_gen_cmedia=([0-9]+).+"', webpage, 'video id')
|
|
||||||
else:
|
|
||||||
player = self._search_regex(r'data-player=\'([^\']+)\'>', webpage, 'data player', default=None)
|
|
||||||
if player:
|
|
||||||
player_data = json.loads(player)
|
|
||||||
video_id = compat_str(player_data['refMedia'])
|
|
||||||
else:
|
|
||||||
model = self._search_regex(r'data-model="([^"]+)">', webpage, 'data model')
|
|
||||||
model_data = self._parse_json(unescapeHTML(model), display_id)
|
|
||||||
video_id = compat_str(model_data['id'])
|
|
||||||
|
|
||||||
xml = self._download_xml('http://www.allocine.fr/ws/AcVisiondataV4.ashx?media=%s' % video_id, display_id)
|
|
||||||
|
|
||||||
video = xpath_element(xml, './/AcVisionVideo').attrib
|
|
||||||
quality = qualities(['ld', 'md', 'hd'])
|
quality = qualities(['ld', 'md', 'hd'])
|
||||||
|
|
||||||
formats = []
|
model = self._html_search_regex(
|
||||||
for k, v in video.items():
|
r'data-model="([^"]+)"', webpage, 'data model', default=None)
|
||||||
if re.match(r'.+_path', k):
|
if model:
|
||||||
format_id = k.split('_')[0]
|
model_data = self._parse_json(model, display_id)
|
||||||
|
|
||||||
|
for video_url in model_data['sources'].values():
|
||||||
|
video_id, format_id = url_basename(video_url).split('_')[:2]
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'quality': quality(format_id),
|
'quality': quality(format_id),
|
||||||
'url': v,
|
'url': video_url,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
title = model_data['title']
|
||||||
|
else:
|
||||||
|
video_id = display_id
|
||||||
|
media_data = self._download_json(
|
||||||
|
'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media=%s' % video_id, display_id)
|
||||||
|
for key, value in media_data['video'].items():
|
||||||
|
if not key.endswith('Path'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
format_id = key[:-len('Path')]
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_id,
|
||||||
|
'quality': quality(format_id),
|
||||||
|
'url': value,
|
||||||
|
})
|
||||||
|
|
||||||
|
title = remove_end(self._html_search_regex(
|
||||||
|
r'(?s)<title>(.+?)</title>', webpage, 'title'
|
||||||
|
).strip(), ' - AlloCiné')
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video['videoTitle'],
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
|
@ -174,11 +174,17 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
|
ERRORS = (
|
||||||
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
|
('>Leider liegt eine Störung vor.', 'Video %s is unavailable'),
|
||||||
|
('>Der gewünschte Beitrag ist nicht mehr verfügbar.<',
|
||||||
|
'Video %s is no longer available'),
|
||||||
|
('Diese Sendung ist für Jugendliche unter 12 Jahren nicht geeignet. Der Clip ist deshalb nur von 20 bis 6 Uhr verfügbar.',
|
||||||
|
'This program is only suitable for those aged 12 and older. Video %s is therefore only available between 8 pm and 6 am.'),
|
||||||
|
)
|
||||||
|
|
||||||
if 'Diese Sendung ist für Jugendliche unter 12 Jahren nicht geeignet. Der Clip ist deshalb nur von 20 bis 6 Uhr verfügbar.' in webpage:
|
for pattern, message in ERRORS:
|
||||||
raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True)
|
if pattern in webpage:
|
||||||
|
raise ExtractorError(message % video_id, expected=True)
|
||||||
|
|
||||||
if re.search(r'[\?&]rss($|[=&])', url):
|
if re.search(r'[\?&]rss($|[=&])', url):
|
||||||
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
@ -410,6 +410,22 @@ class ArteTVEmbedIE(ArteTVPlus7IE):
|
|||||||
return self._extract_from_json_url(json_url, video_id, lang)
|
return self._extract_from_json_url(json_url, video_id, lang)
|
||||||
|
|
||||||
|
|
||||||
|
class TheOperaPlatformIE(ArteTVPlus7IE):
|
||||||
|
IE_NAME = 'theoperaplatform'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?theoperaplatform\.eu/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.theoperaplatform.eu/de/opera/verdi-otello',
|
||||||
|
'md5': '970655901fa2e82e04c00b955e9afe7b',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '060338-009-A',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Verdi - OTELLO',
|
||||||
|
'upload_date': '20160927',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
|
||||||
class ArteTVPlaylistIE(ArteTVBaseIE):
|
class ArteTVPlaylistIE(ArteTVBaseIE):
|
||||||
IE_NAME = 'arte.tv:playlist'
|
IE_NAME = 'arte.tv:playlist'
|
||||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de|en|es)/[^#]*#collection/(?P<id>PL-\d+)'
|
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de|en|es)/[^#]*#collection/(?P<id>PL-\d+)'
|
||||||
|
@ -8,10 +8,10 @@ from ..compat import compat_str
|
|||||||
from ..utils import int_or_none
|
from ..utils import int_or_none
|
||||||
|
|
||||||
|
|
||||||
class BeatportProIE(InfoExtractor):
|
class BeatportIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://pro\.beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.|pro\.)?beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://pro.beatport.com/track/synesthesia-original-mix/5379371',
|
'url': 'https://beatport.com/track/synesthesia-original-mix/5379371',
|
||||||
'md5': 'b3c34d8639a2f6a7f734382358478887',
|
'md5': 'b3c34d8639a2f6a7f734382358478887',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5379371',
|
'id': '5379371',
|
||||||
@ -20,7 +20,7 @@ class BeatportProIE(InfoExtractor):
|
|||||||
'title': 'Froxic - Synesthesia (Original Mix)',
|
'title': 'Froxic - Synesthesia (Original Mix)',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://pro.beatport.com/track/love-and-war-original-mix/3756896',
|
'url': 'https://beatport.com/track/love-and-war-original-mix/3756896',
|
||||||
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
|
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3756896',
|
'id': '3756896',
|
||||||
@ -29,7 +29,7 @@ class BeatportProIE(InfoExtractor):
|
|||||||
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
|
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://pro.beatport.com/track/birds-original-mix/4991738',
|
'url': 'https://beatport.com/track/birds-original-mix/4991738',
|
||||||
'md5': 'a1fd8e8046de3950fd039304c186c05f',
|
'md5': 'a1fd8e8046de3950fd039304c186c05f',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4991738',
|
'id': '4991738',
|
@ -46,19 +46,19 @@ class BeegIE(InfoExtractor):
|
|||||||
self._proto_relative_url(cpl_url), video_id,
|
self._proto_relative_url(cpl_url), video_id,
|
||||||
'Downloading cpl JS', fatal=False)
|
'Downloading cpl JS', fatal=False)
|
||||||
if cpl:
|
if cpl:
|
||||||
beeg_version = self._search_regex(
|
beeg_version = int_or_none(self._search_regex(
|
||||||
r'beeg_version\s*=\s*(\d+)', cpl,
|
r'beeg_version\s*=\s*([^\b]+)', cpl,
|
||||||
'beeg version', default=None) or self._search_regex(
|
'beeg version', default=None)) or self._search_regex(
|
||||||
r'/(\d+)\.js', cpl_url, 'beeg version', default=None)
|
r'/(\d+)\.js', cpl_url, 'beeg version', default=None)
|
||||||
beeg_salt = self._search_regex(
|
beeg_salt = self._search_regex(
|
||||||
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg beeg_salt',
|
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg salt',
|
||||||
default=None, group='beeg_salt')
|
default=None, group='beeg_salt')
|
||||||
|
|
||||||
beeg_version = beeg_version or '1750'
|
beeg_version = beeg_version or '2000'
|
||||||
beeg_salt = beeg_salt or 'MIDtGaw96f0N1kMMAM1DE46EC9pmFr'
|
beeg_salt = beeg_salt or 'pmweAkq8lAYKdfWcFCUj0yoVgoPlinamH5UE1CB3H'
|
||||||
|
|
||||||
video = self._download_json(
|
video = self._download_json(
|
||||||
'http://api.beeg.com/api/v6/%s/video/%s' % (beeg_version, video_id),
|
'https://api.beeg.com/api/v6/%s/video/%s' % (beeg_version, video_id),
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
def split(o, e):
|
def split(o, e):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
@ -6,11 +6,13 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_urlparse
|
from ..compat import compat_urllib_parse_urlparse
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
dict_get,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
HEADRequest,
|
||||||
unified_strdate,
|
|
||||||
qualities,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
qualities,
|
||||||
|
remove_end,
|
||||||
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -43,47 +45,46 @@ class CanalplusIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.canalplus.fr/c-emissions/pid1830-c-zapping.html?vid=1192814',
|
'url': 'http://www.canalplus.fr/c-emissions/pid1830-c-zapping.html?vid=1192814',
|
||||||
'md5': '41f438a4904f7664b91b4ed0dec969dc',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1192814',
|
'id': '1405510',
|
||||||
|
'display_id': 'pid1830-c-zapping',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "L'Année du Zapping 2014 - L'Année du Zapping 2014",
|
'title': 'Zapping - 02/07/2016',
|
||||||
'description': "Toute l'année 2014 dans un Zapping exceptionnel !",
|
'description': 'Le meilleur de toutes les chaînes, tous les jours',
|
||||||
'upload_date': '20150105',
|
'upload_date': '20160702',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
|
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1108190',
|
'id': '1108190',
|
||||||
'ext': 'flv',
|
'display_id': 'pid1405-le-labyrinthe-boing-super-ranger',
|
||||||
'title': 'Le labyrinthe - Boing super ranger',
|
'ext': 'mp4',
|
||||||
|
'title': 'BOING SUPER RANGER - Ep : Le labyrinthe',
|
||||||
'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
|
'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
|
||||||
'upload_date': '20140724',
|
'upload_date': '20140724',
|
||||||
},
|
},
|
||||||
'skip': 'Only works from France',
|
'skip': 'Only works from France',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.d8.tv/d8-docs-mags/pid5198-d8-en-quete-d-actualite.html?vid=1390231',
|
'url': 'http://www.c8.fr/c8-divertissement/ms-touche-pas-a-mon-poste/pid6318-videos-integrales.html',
|
||||||
|
'md5': '4b47b12b4ee43002626b97fad8fb1de5',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1390231',
|
'id': '1420213',
|
||||||
|
'display_id': 'pid6318-videos-integrales',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "Vacances pas chères : prix discount ou grosses dépenses ? - En quête d'actualité",
|
'title': 'TPMP ! Même le matin - Les 35H de Baba - 14/10/2016',
|
||||||
'description': 'md5:edb6cf1cb4a1e807b5dd089e1ac8bfc6',
|
'description': 'md5:f96736c1b0ffaa96fd5b9e60ad871799',
|
||||||
'upload_date': '20160512',
|
'upload_date': '20161014',
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
},
|
||||||
|
'skip': 'Only works from France',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.itele.fr/chroniques/invite-bruce-toussaint/thierry-solere-nicolas-sarkozy-officialisera-sa-candidature-a-la-primaire-quand-il-le-voudra-167224',
|
'url': 'http://www.itele.fr/chroniques/invite-michael-darmon/rachida-dati-nicolas-sarkozy-est-le-plus-en-phase-avec-les-inquietudes-des-francais-171510',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1398334',
|
'id': '1420176',
|
||||||
|
'display_id': 'rachida-dati-nicolas-sarkozy-est-le-plus-en-phase-avec-les-inquietudes-des-francais-171510',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "L'invité de Bruce Toussaint du 07/06/2016 - ",
|
'title': 'L\'invité de Michaël Darmon du 14/10/2016 - ',
|
||||||
'description': 'md5:40ac7c9ad0feaeb6f605bad986f61324',
|
'description': 'Chaque matin du lundi au vendredi, Michaël Darmon reçoit un invité politique à 8h25.',
|
||||||
'upload_date': '20160607',
|
'upload_date': '20161014',
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://m.canalplus.fr/?vid=1398231',
|
'url': 'http://m.canalplus.fr/?vid=1398231',
|
||||||
@ -95,18 +96,17 @@ class CanalplusIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.groupdict().get('id') or mobj.groupdict().get('vid')
|
|
||||||
|
|
||||||
site_id = self._SITE_ID_MAP[compat_urllib_parse_urlparse(url).netloc.rsplit('.', 2)[-2]]
|
site_id = self._SITE_ID_MAP[compat_urllib_parse_urlparse(url).netloc.rsplit('.', 2)[-2]]
|
||||||
|
|
||||||
# Beware, some subclasses do not define an id group
|
# Beware, some subclasses do not define an id group
|
||||||
display_id = mobj.group('display_id') or video_id
|
display_id = remove_end(dict_get(mobj.groupdict(), ('display_id', 'id', 'vid')), '.html')
|
||||||
|
|
||||||
if video_id is None:
|
webpage = self._download_webpage(url, display_id)
|
||||||
webpage = self._download_webpage(url, display_id)
|
video_id = self._search_regex(
|
||||||
video_id = self._search_regex(
|
[r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)',
|
||||||
[r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)', r'id=["\']canal_video_player(?P<id>\d+)'],
|
r'id=["\']canal_video_player(?P<id>\d+)'],
|
||||||
webpage, 'video id', group='id')
|
webpage, 'video id', group='id')
|
||||||
|
|
||||||
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
|
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
|
||||||
video_data = self._download_json(info_url, video_id, 'Downloading video JSON')
|
video_data = self._download_json(info_url, video_id, 'Downloading video JSON')
|
||||||
|
@ -9,6 +9,8 @@ from ..utils import (
|
|||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from .videomore import VideomoreIE
|
||||||
|
|
||||||
|
|
||||||
class CarambaTVIE(InfoExtractor):
|
class CarambaTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:carambatv:|https?://video1\.carambatv\.ru/v/)(?P<id>\d+)'
|
_VALID_URL = r'(?:carambatv:|https?://video1\.carambatv\.ru/v/)(?P<id>\d+)'
|
||||||
@ -62,14 +64,16 @@ class CarambaTVPageIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://carambatv\.ru/(?:[^/]+/)+(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://carambatv\.ru/(?:[^/]+/)+(?P<id>[^/?#&]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://carambatv.ru/movie/bad-comedian/razborka-v-manile/',
|
'url': 'http://carambatv.ru/movie/bad-comedian/razborka-v-manile/',
|
||||||
'md5': '',
|
'md5': 'a49fb0ec2ad66503eeb46aac237d3c86',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '191910501',
|
'id': '475222',
|
||||||
'ext': 'mp4',
|
'ext': 'flv',
|
||||||
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
|
'title': '[BadComedian] - Разборка в Маниле (Абсолютный обзор)',
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
'duration': 2678.31,
|
# duration reported by videomore is incorrect
|
||||||
|
'duration': int,
|
||||||
},
|
},
|
||||||
|
'add_ie': [VideomoreIE.ie_key()],
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -77,6 +81,16 @@ class CarambaTVPageIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
videomore_url = VideomoreIE._extract_url(webpage)
|
||||||
|
if videomore_url:
|
||||||
|
title = self._og_search_title(webpage)
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': videomore_url,
|
||||||
|
'ie_key': VideomoreIE.ie_key(),
|
||||||
|
'title': title,
|
||||||
|
}
|
||||||
|
|
||||||
video_url = self._og_search_property('video:iframe', webpage, default=None)
|
video_url = self._og_search_property('video:iframe', webpage, default=None)
|
||||||
|
|
||||||
if not video_url:
|
if not video_url:
|
||||||
|
@ -63,7 +63,7 @@ class CBSInteractiveIE(ThePlatformIE):
|
|||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
data_json = self._html_search_regex(
|
data_json = self._html_search_regex(
|
||||||
r"data-(?:cnet|zdnet)-video(?:-uvp)?-options='([^']+)'",
|
r"data-(?:cnet|zdnet)-video(?:-uvp(?:js)?)?-options='([^']+)'",
|
||||||
webpage, 'data json')
|
webpage, 'data json')
|
||||||
data = self._parse_json(data_json, display_id)
|
data = self._parse_json(data_json, display_id)
|
||||||
vdata = data.get('video') or data['videos'][0]
|
vdata = data.get('video') or data['videos'][0]
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import parse_duration
|
from ..utils import parse_duration
|
||||||
@ -70,7 +71,6 @@ class ChirbitProfileIE(InfoExtractor):
|
|||||||
'url': 'http://chirbit.com/ScarletBeauty',
|
'url': 'http://chirbit.com/ScarletBeauty',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'ScarletBeauty',
|
'id': 'ScarletBeauty',
|
||||||
'title': 'Chirbits by ScarletBeauty',
|
|
||||||
},
|
},
|
||||||
'playlist_mincount': 3,
|
'playlist_mincount': 3,
|
||||||
}
|
}
|
||||||
@ -78,13 +78,10 @@ class ChirbitProfileIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
profile_id = self._match_id(url)
|
profile_id = self._match_id(url)
|
||||||
|
|
||||||
rss = self._download_xml(
|
webpage = self._download_webpage(url, profile_id)
|
||||||
'http://chirbit.com/rss/%s' % profile_id, profile_id)
|
|
||||||
|
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result(audio_url.text, 'Chirbit')
|
self.url_result(self._proto_relative_url('//chirb.it/' + video_id))
|
||||||
for audio_url in rss.findall('./channel/item/link')]
|
for _, video_id in re.findall(r'<input[^>]+id=([\'"])copy-btn-(?P<id>[0-9a-zA-Z]+)\1', webpage)]
|
||||||
|
|
||||||
title = rss.find('./channel/title').text
|
return self.playlist_result(entries, profile_id)
|
||||||
|
|
||||||
return self.playlist_result(entries, profile_id, title)
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -10,15 +11,15 @@ from ..utils import (
|
|||||||
class ClipfishIE(InfoExtractor):
|
class ClipfishIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/',
|
'url': 'http://www.clipfish.de/special/ugly-americans/video/4343170/s01-e01-ugly-americans-date-in-der-hoelle/',
|
||||||
'md5': '79bc922f3e8a9097b3d68a93780fd475',
|
'md5': '720563e467b86374c194bdead08d207d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3966754',
|
'id': '4343170',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'FIFA 14 - E3 2013 Trailer',
|
'title': 'S01 E01 - Ugly Americans - Date in der Hölle',
|
||||||
'description': 'Video zu FIFA 14: E3 2013 Trailer',
|
'description': 'Mark Lilly arbeitet im Sozialdienst der Stadt New York und soll Immigranten bei ihrer Einbürgerung in die USA zur Seite stehen.',
|
||||||
'upload_date': '20130611',
|
'upload_date': '20161005',
|
||||||
'duration': 82,
|
'duration': 1291,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -50,10 +51,14 @@ class ClipfishIE(InfoExtractor):
|
|||||||
'tbr': int_or_none(video_info.get('bitrate')),
|
'tbr': int_or_none(video_info.get('bitrate')),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
descr = video_info.get('descr')
|
||||||
|
if descr:
|
||||||
|
descr = descr.strip()
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_info['title'],
|
'title': video_info['title'],
|
||||||
'description': video_info.get('descr'),
|
'description': descr,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'),
|
'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'),
|
||||||
'duration': int_or_none(video_info.get('media_length')),
|
'duration': int_or_none(video_info.get('media_length')),
|
||||||
|
@ -26,7 +26,7 @@ class CMTIE(MTVIE):
|
|||||||
'id': '1504699',
|
'id': '1504699',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Still The King Ep. 109 in 3 Minutes',
|
'title': 'Still The King Ep. 109 in 3 Minutes',
|
||||||
'description': 'Relive or catch up with Still The King by watching this recap of season 1, episode 9. New episodes Sundays 9/8c.',
|
'description': 'Relive or catch up with Still The King by watching this recap of season 1, episode 9.',
|
||||||
'timestamp': 1469421000.0,
|
'timestamp': 1469421000.0,
|
||||||
'upload_date': '20160725',
|
'upload_date': '20160725',
|
||||||
},
|
},
|
||||||
@ -42,3 +42,8 @@ class CMTIE(MTVIE):
|
|||||||
'%s said: video is not available' % cls.IE_NAME, expected=True)
|
'%s said: video is not available' % cls.IE_NAME, expected=True)
|
||||||
|
|
||||||
return super(CMTIE, cls)._transform_rtmp_url(rtmp_video_url)
|
return super(CMTIE, cls)._transform_rtmp_url(rtmp_video_url)
|
||||||
|
|
||||||
|
def _extract_mgid(self, webpage):
|
||||||
|
return self._search_regex(
|
||||||
|
r'MTVN\.VIDEO\.contentUri\s*=\s*([\'"])(?P<mgid>.+?)\1',
|
||||||
|
webpage, 'mgid', group='mgid')
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -21,6 +21,7 @@ from ..compat import (
|
|||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
@ -29,6 +30,7 @@ from ..downloader.f4m import remove_encrypted_media
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
age_restricted,
|
age_restricted,
|
||||||
|
base_url,
|
||||||
bug_reports_message,
|
bug_reports_message,
|
||||||
clean_html,
|
clean_html,
|
||||||
compiled_regex_type,
|
compiled_regex_type,
|
||||||
@ -234,7 +236,7 @@ class InfoExtractor(object):
|
|||||||
chapter_id: Id of the chapter the video belongs to, as a unicode string.
|
chapter_id: Id of the chapter the video belongs to, as a unicode string.
|
||||||
|
|
||||||
The following fields should only be used when the video is an episode of some
|
The following fields should only be used when the video is an episode of some
|
||||||
series or programme:
|
series, programme or podcast:
|
||||||
|
|
||||||
series: Title of the series or programme the video episode belongs to.
|
series: Title of the series or programme the video episode belongs to.
|
||||||
season: Title of the season the video episode belongs to.
|
season: Title of the season the video episode belongs to.
|
||||||
@ -1099,6 +1101,13 @@ class InfoExtractor(object):
|
|||||||
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
|
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
|
||||||
'bootstrap info', default=None)
|
'bootstrap info', default=None)
|
||||||
|
|
||||||
|
vcodec = None
|
||||||
|
mime_type = xpath_text(
|
||||||
|
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
|
||||||
|
'base URL', default=None)
|
||||||
|
if mime_type and mime_type.startswith('audio/'):
|
||||||
|
vcodec = 'none'
|
||||||
|
|
||||||
for i, media_el in enumerate(media_nodes):
|
for i, media_el in enumerate(media_nodes):
|
||||||
tbr = int_or_none(media_el.attrib.get('bitrate'))
|
tbr = int_or_none(media_el.attrib.get('bitrate'))
|
||||||
width = int_or_none(media_el.attrib.get('width'))
|
width = int_or_none(media_el.attrib.get('width'))
|
||||||
@ -1139,6 +1148,7 @@ class InfoExtractor(object):
|
|||||||
'width': f.get('width') or width,
|
'width': f.get('width') or width,
|
||||||
'height': f.get('height') or height,
|
'height': f.get('height') or height,
|
||||||
'format_id': f.get('format_id') if not tbr else format_id,
|
'format_id': f.get('format_id') if not tbr else format_id,
|
||||||
|
'vcodec': vcodec,
|
||||||
})
|
})
|
||||||
formats.extend(f4m_formats)
|
formats.extend(f4m_formats)
|
||||||
continue
|
continue
|
||||||
@ -1155,6 +1165,7 @@ class InfoExtractor(object):
|
|||||||
'tbr': tbr,
|
'tbr': tbr,
|
||||||
'width': width,
|
'width': width,
|
||||||
'height': height,
|
'height': height,
|
||||||
|
'vcodec': vcodec,
|
||||||
'preference': preference,
|
'preference': preference,
|
||||||
})
|
})
|
||||||
return formats
|
return formats
|
||||||
@ -1529,7 +1540,7 @@ class InfoExtractor(object):
|
|||||||
if res is False:
|
if res is False:
|
||||||
return []
|
return []
|
||||||
mpd, urlh = res
|
mpd, urlh = res
|
||||||
mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
|
mpd_base_url = base_url(urlh.geturl())
|
||||||
|
|
||||||
return self._parse_mpd_formats(
|
return self._parse_mpd_formats(
|
||||||
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
|
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
|
||||||
@ -1770,6 +1781,105 @@ class InfoExtractor(object):
|
|||||||
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
|
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
|
||||||
|
res = self._download_webpage_handle(
|
||||||
|
ism_url, video_id,
|
||||||
|
note=note or 'Downloading ISM manifest',
|
||||||
|
errnote=errnote or 'Failed to download ISM manifest',
|
||||||
|
fatal=fatal)
|
||||||
|
if res is False:
|
||||||
|
return []
|
||||||
|
ism, urlh = res
|
||||||
|
|
||||||
|
return self._parse_ism_formats(
|
||||||
|
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
|
||||||
|
|
||||||
|
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
|
||||||
|
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
|
||||||
|
return []
|
||||||
|
|
||||||
|
duration = int(ism_doc.attrib['Duration'])
|
||||||
|
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for stream in ism_doc.findall('StreamIndex'):
|
||||||
|
stream_type = stream.get('Type')
|
||||||
|
if stream_type not in ('video', 'audio'):
|
||||||
|
continue
|
||||||
|
url_pattern = stream.attrib['Url']
|
||||||
|
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
|
||||||
|
stream_name = stream.get('Name')
|
||||||
|
for track in stream.findall('QualityLevel'):
|
||||||
|
fourcc = track.get('FourCC')
|
||||||
|
# TODO: add support for WVC1 and WMAP
|
||||||
|
if fourcc not in ('H264', 'AVC1', 'AACL'):
|
||||||
|
self.report_warning('%s is not a supported codec' % fourcc)
|
||||||
|
continue
|
||||||
|
tbr = int(track.attrib['Bitrate']) // 1000
|
||||||
|
width = int_or_none(track.get('MaxWidth'))
|
||||||
|
height = int_or_none(track.get('MaxHeight'))
|
||||||
|
sampling_rate = int_or_none(track.get('SamplingRate'))
|
||||||
|
|
||||||
|
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
|
||||||
|
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
|
||||||
|
|
||||||
|
fragments = []
|
||||||
|
fragment_ctx = {
|
||||||
|
'time': 0,
|
||||||
|
}
|
||||||
|
stream_fragments = stream.findall('c')
|
||||||
|
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
|
||||||
|
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
|
||||||
|
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
|
||||||
|
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
|
||||||
|
if not fragment_ctx['duration']:
|
||||||
|
try:
|
||||||
|
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
|
||||||
|
except IndexError:
|
||||||
|
next_fragment_time = duration
|
||||||
|
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
|
||||||
|
for _ in range(fragment_repeat):
|
||||||
|
fragments.append({
|
||||||
|
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
|
||||||
|
'duration': fragment_ctx['duration'] / stream_timescale,
|
||||||
|
})
|
||||||
|
fragment_ctx['time'] += fragment_ctx['duration']
|
||||||
|
|
||||||
|
format_id = []
|
||||||
|
if ism_id:
|
||||||
|
format_id.append(ism_id)
|
||||||
|
if stream_name:
|
||||||
|
format_id.append(stream_name)
|
||||||
|
format_id.append(compat_str(tbr))
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'format_id': '-'.join(format_id),
|
||||||
|
'url': ism_url,
|
||||||
|
'manifest_url': ism_url,
|
||||||
|
'ext': 'ismv' if stream_type == 'video' else 'isma',
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
|
'tbr': tbr,
|
||||||
|
'asr': sampling_rate,
|
||||||
|
'vcodec': 'none' if stream_type == 'audio' else fourcc,
|
||||||
|
'acodec': 'none' if stream_type == 'video' else fourcc,
|
||||||
|
'protocol': 'ism',
|
||||||
|
'fragments': fragments,
|
||||||
|
'_download_params': {
|
||||||
|
'duration': duration,
|
||||||
|
'timescale': stream_timescale,
|
||||||
|
'width': width or 0,
|
||||||
|
'height': height or 0,
|
||||||
|
'fourcc': fourcc,
|
||||||
|
'codec_private_data': track.get('CodecPrivateData'),
|
||||||
|
'sampling_rate': sampling_rate,
|
||||||
|
'channels': int_or_none(track.get('Channels', 2)),
|
||||||
|
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
|
||||||
|
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return formats
|
||||||
|
|
||||||
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8'):
|
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8'):
|
||||||
def absolute_url(video_url):
|
def absolute_url(video_url):
|
||||||
return compat_urlparse.urljoin(base_url, video_url)
|
return compat_urlparse.urljoin(base_url, video_url)
|
||||||
@ -1801,7 +1911,11 @@ class InfoExtractor(object):
|
|||||||
return is_plain_url, formats
|
return is_plain_url, formats
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for media_tag, media_type, media_content in re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage):
|
media_tags = [(media_tag, media_type, '')
|
||||||
|
for media_tag, media_type
|
||||||
|
in re.findall(r'(?s)(<(video|audio)[^>]*/>)', webpage)]
|
||||||
|
media_tags.extend(re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage))
|
||||||
|
for media_tag, media_type, media_content in media_tags:
|
||||||
media_info = {
|
media_info = {
|
||||||
'formats': [],
|
'formats': [],
|
||||||
'subtitles': {},
|
'subtitles': {},
|
||||||
@ -1870,11 +1984,11 @@ class InfoExtractor(object):
|
|||||||
formats.extend(self._extract_f4m_formats(
|
formats.extend(self._extract_f4m_formats(
|
||||||
http_base_url + '/manifest.f4m',
|
http_base_url + '/manifest.f4m',
|
||||||
video_id, f4m_id='hds', fatal=False))
|
video_id, f4m_id='hds', fatal=False))
|
||||||
|
if 'dash' not in skip_protocols:
|
||||||
|
formats.extend(self._extract_mpd_formats(
|
||||||
|
http_base_url + '/manifest.mpd',
|
||||||
|
video_id, mpd_id='dash', fatal=False))
|
||||||
if re.search(r'(?:/smil:|\.smil)', url_base):
|
if re.search(r'(?:/smil:|\.smil)', url_base):
|
||||||
if 'dash' not in skip_protocols:
|
|
||||||
formats.extend(self._extract_mpd_formats(
|
|
||||||
http_base_url + '/manifest.mpd',
|
|
||||||
video_id, mpd_id='dash', fatal=False))
|
|
||||||
if 'smil' not in skip_protocols:
|
if 'smil' not in skip_protocols:
|
||||||
rtmp_formats = self._extract_smil_formats(
|
rtmp_formats = self._extract_smil_formats(
|
||||||
http_base_url + '/jwplayer.smil',
|
http_base_url + '/jwplayer.smil',
|
||||||
@ -2020,6 +2134,12 @@ class InfoExtractor(object):
|
|||||||
headers['Ytdl-request-proxy'] = geo_verification_proxy
|
headers['Ytdl-request-proxy'] = geo_verification_proxy
|
||||||
return headers
|
return headers
|
||||||
|
|
||||||
|
def _generic_id(self, url):
|
||||||
|
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
|
||||||
|
|
||||||
|
def _generic_title(self, url):
|
||||||
|
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
|
||||||
|
|
||||||
|
|
||||||
class SearchInfoExtractor(InfoExtractor):
|
class SearchInfoExtractor(InfoExtractor):
|
||||||
"""
|
"""
|
||||||
|
@ -1,13 +1,9 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_unquote,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import url_basename
|
|
||||||
|
|
||||||
|
|
||||||
class RtmpIE(InfoExtractor):
|
class RtmpIE(InfoExtractor):
|
||||||
@ -23,8 +19,8 @@ class RtmpIE(InfoExtractor):
|
|||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
|
video_id = self._generic_id(url)
|
||||||
title = compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
|
title = self._generic_title(url)
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
@ -34,3 +30,31 @@ class RtmpIE(InfoExtractor):
|
|||||||
'format_id': compat_urlparse.urlparse(url).scheme,
|
'format_id': compat_urlparse.urlparse(url).scheme,
|
||||||
}],
|
}],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class MmsIE(InfoExtractor):
|
||||||
|
IE_DESC = False # Do not list
|
||||||
|
_VALID_URL = r'(?i)mms://.+'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
# Direct MMS link
|
||||||
|
'url': 'mms://kentro.kaist.ac.kr/200907/MilesReid(0709).wmv',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'MilesReid(0709)',
|
||||||
|
'ext': 'wmv',
|
||||||
|
'title': 'MilesReid(0709)',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True, # rtsp downloads, requiring mplayer or mpv
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._generic_id(url)
|
||||||
|
title = self._generic_title(url)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'url': url,
|
||||||
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
@ -150,6 +150,7 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
|||||||
# rtmp
|
# rtmp
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
'skip': 'Video gone',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.crunchyroll.com/rezero-starting-life-in-another-world-/episode-5-the-morning-of-our-promise-is-still-distant-702409',
|
'url': 'http://www.crunchyroll.com/rezero-starting-life-in-another-world-/episode-5-the-morning-of-our-promise-is-still-distant-702409',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -94,7 +94,8 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||||||
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
|
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
|
||||||
'uploader': 'HotWaves1012',
|
'uploader': 'HotWaves1012',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
},
|
||||||
|
'skip': 'video gone',
|
||||||
},
|
},
|
||||||
# geo-restricted, player v5
|
# geo-restricted, player v5
|
||||||
{
|
{
|
||||||
@ -144,7 +145,8 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||||||
player_v5 = self._search_regex(
|
player_v5 = self._search_regex(
|
||||||
[r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826
|
[r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826
|
||||||
r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
|
r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
|
||||||
r'buildPlayer\(({.+?})\);'],
|
r'buildPlayer\(({.+?})\);',
|
||||||
|
r'var\s+config\s*=\s*({.+?});'],
|
||||||
webpage, 'player v5', default=None)
|
webpage, 'player v5', default=None)
|
||||||
if player_v5:
|
if player_v5:
|
||||||
player = self._parse_json(player_v5, video_id)
|
player = self._parse_json(player_v5, video_id)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ from ..utils import (
|
|||||||
|
|
||||||
class DotsubIE(InfoExtractor):
|
class DotsubIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'https://dotsub.com/view/9c63db2a-fa95-4838-8e6e-13deafe47f09',
|
'url': 'https://dotsub.com/view/9c63db2a-fa95-4838-8e6e-13deafe47f09',
|
||||||
'md5': '21c7ff600f545358134fea762a6d42b6',
|
'md5': '21c7ff600f545358134fea762a6d42b6',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -24,7 +24,24 @@ class DotsubIE(InfoExtractor):
|
|||||||
'upload_date': '20131130',
|
'upload_date': '20131130',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
}
|
}
|
||||||
}
|
}, {
|
||||||
|
'url': 'https://dotsub.com/view/747bcf58-bd59-45b7-8c8c-ac312d084ee6',
|
||||||
|
'md5': '2bb4a83896434d5c26be868c609429a3',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '168006778',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Apartments and flats in Raipur the white symphony',
|
||||||
|
'description': 'md5:784d0639e6b7d1bc29530878508e38fe',
|
||||||
|
'thumbnail': 're:^https?://dotsub.com/media/747bcf58-bd59-45b7-8c8c-ac312d084ee6/p',
|
||||||
|
'duration': 290,
|
||||||
|
'timestamp': 1476767794.2809999,
|
||||||
|
'upload_date': '20160525',
|
||||||
|
'uploader': 'parthivi001',
|
||||||
|
'uploader_id': 'user52596202',
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
'add_ie': ['Vimeo'],
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
@ -37,12 +54,23 @@ class DotsubIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
video_url = self._search_regex(
|
video_url = self._search_regex(
|
||||||
[r'<source[^>]+src="([^"]+)"', r'"file"\s*:\s*\'([^\']+)'],
|
[r'<source[^>]+src="([^"]+)"', r'"file"\s*:\s*\'([^\']+)'],
|
||||||
webpage, 'video url')
|
webpage, 'video url', default=None)
|
||||||
|
info_dict = {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': 'flv',
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
if not video_url:
|
||||||
'id': video_id,
|
setup_data = self._parse_json(self._html_search_regex(
|
||||||
'url': video_url,
|
r'(?s)data-setup=([\'"])(?P<content>(?!\1).+?)\1',
|
||||||
'ext': 'flv',
|
webpage, 'setup data', group='content'), video_id)
|
||||||
|
info_dict = {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': setup_data['src'],
|
||||||
|
}
|
||||||
|
|
||||||
|
info_dict.update({
|
||||||
'title': info['title'],
|
'title': info['title'],
|
||||||
'description': info.get('description'),
|
'description': info.get('description'),
|
||||||
'thumbnail': info.get('screenshotURI'),
|
'thumbnail': info.get('screenshotURI'),
|
||||||
@ -50,4 +78,6 @@ class DotsubIE(InfoExtractor):
|
|||||||
'uploader': info.get('user'),
|
'uploader': info.get('user'),
|
||||||
'timestamp': float_or_none(info.get('dateCreated'), 1000),
|
'timestamp': float_or_none(info.get('dateCreated'), 1000),
|
||||||
'view_count': int_or_none(info.get('numberOfViews')),
|
'view_count': int_or_none(info.get('numberOfViews')),
|
||||||
}
|
})
|
||||||
|
|
||||||
|
return info_dict
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -66,6 +66,7 @@ from .arte import (
|
|||||||
ArteTVDDCIE,
|
ArteTVDDCIE,
|
||||||
ArteTVMagazineIE,
|
ArteTVMagazineIE,
|
||||||
ArteTVEmbedIE,
|
ArteTVEmbedIE,
|
||||||
|
TheOperaPlatformIE,
|
||||||
ArteTVPlaylistIE,
|
ArteTVPlaylistIE,
|
||||||
)
|
)
|
||||||
from .atresplayer import AtresPlayerIE
|
from .atresplayer import AtresPlayerIE
|
||||||
@ -93,7 +94,7 @@ from .bbc import (
|
|||||||
from .beeg import BeegIE
|
from .beeg import BeegIE
|
||||||
from .behindkink import BehindKinkIE
|
from .behindkink import BehindKinkIE
|
||||||
from .bellmedia import BellMediaIE
|
from .bellmedia import BellMediaIE
|
||||||
from .beatportpro import BeatportProIE
|
from .beatport import BeatportIE
|
||||||
from .bet import BetIE
|
from .bet import BetIE
|
||||||
from .bigflix import BigflixIE
|
from .bigflix import BigflixIE
|
||||||
from .bild import BildIE
|
from .bild import BildIE
|
||||||
@ -186,7 +187,10 @@ from .comedycentral import (
|
|||||||
)
|
)
|
||||||
from .comcarcoff import ComCarCoffIE
|
from .comcarcoff import ComCarCoffIE
|
||||||
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
||||||
from .commonprotocols import RtmpIE
|
from .commonprotocols import (
|
||||||
|
MmsIE,
|
||||||
|
RtmpIE,
|
||||||
|
)
|
||||||
from .condenast import CondeNastIE
|
from .condenast import CondeNastIE
|
||||||
from .cracked import CrackedIE
|
from .cracked import CrackedIE
|
||||||
from .crackle import CrackleIE
|
from .crackle import CrackleIE
|
||||||
@ -345,7 +349,10 @@ from .goshgay import GoshgayIE
|
|||||||
from .gputechconf import GPUTechConfIE
|
from .gputechconf import GPUTechConfIE
|
||||||
from .groupon import GrouponIE
|
from .groupon import GrouponIE
|
||||||
from .hark import HarkIE
|
from .hark import HarkIE
|
||||||
from .hbo import HBOIE
|
from .hbo import (
|
||||||
|
HBOIE,
|
||||||
|
HBOEpisodeIE,
|
||||||
|
)
|
||||||
from .hearthisat import HearThisAtIE
|
from .hearthisat import HearThisAtIE
|
||||||
from .heise import HeiseIE
|
from .heise import HeiseIE
|
||||||
from .hellporno import HellPornoIE
|
from .hellporno import HellPornoIE
|
||||||
@ -366,6 +373,7 @@ from .hrti import (
|
|||||||
HRTiIE,
|
HRTiIE,
|
||||||
HRTiPlaylistIE,
|
HRTiPlaylistIE,
|
||||||
)
|
)
|
||||||
|
from .huajiao import HuajiaoIE
|
||||||
from .huffpost import HuffPostIE
|
from .huffpost import HuffPostIE
|
||||||
from .hypem import HypemIE
|
from .hypem import HypemIE
|
||||||
from .iconosquare import IconosquareIE
|
from .iconosquare import IconosquareIE
|
||||||
@ -400,6 +408,10 @@ from .ivi import (
|
|||||||
from .ivideon import IvideonIE
|
from .ivideon import IvideonIE
|
||||||
from .iwara import IwaraIE
|
from .iwara import IwaraIE
|
||||||
from .izlesene import IzleseneIE
|
from .izlesene import IzleseneIE
|
||||||
|
from .jamendo import (
|
||||||
|
JamendoIE,
|
||||||
|
JamendoAlbumIE,
|
||||||
|
)
|
||||||
from .jeuxvideo import JeuxVideoIE
|
from .jeuxvideo import JeuxVideoIE
|
||||||
from .jove import JoveIE
|
from .jove import JoveIE
|
||||||
from .jwplatform import JWPlatformIE
|
from .jwplatform import JWPlatformIE
|
||||||
@ -437,6 +449,7 @@ from .lcp import (
|
|||||||
)
|
)
|
||||||
from .learnr import LearnrIE
|
from .learnr import LearnrIE
|
||||||
from .lecture2go import Lecture2GoIE
|
from .lecture2go import Lecture2GoIE
|
||||||
|
from .lego import LEGOIE
|
||||||
from .lemonde import LemondeIE
|
from .lemonde import LemondeIE
|
||||||
from .leeco import (
|
from .leeco import (
|
||||||
LeIE,
|
LeIE,
|
||||||
@ -583,6 +596,7 @@ from .nhl import (
|
|||||||
from .nick import (
|
from .nick import (
|
||||||
NickIE,
|
NickIE,
|
||||||
NickDeIE,
|
NickDeIE,
|
||||||
|
NickNightIE,
|
||||||
)
|
)
|
||||||
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
||||||
from .ninecninemedia import (
|
from .ninecninemedia import (
|
||||||
@ -592,6 +606,7 @@ from .ninecninemedia import (
|
|||||||
from .ninegag import NineGagIE
|
from .ninegag import NineGagIE
|
||||||
from .ninenow import NineNowIE
|
from .ninenow import NineNowIE
|
||||||
from .nintendo import NintendoIE
|
from .nintendo import NintendoIE
|
||||||
|
from .nobelprize import NobelPrizeIE
|
||||||
from .noco import NocoIE
|
from .noco import NocoIE
|
||||||
from .normalboots import NormalbootsIE
|
from .normalboots import NormalbootsIE
|
||||||
from .nosvideo import NosVideoIE
|
from .nosvideo import NosVideoIE
|
||||||
@ -637,6 +652,7 @@ from .nytimes import (
|
|||||||
NYTimesArticleIE,
|
NYTimesArticleIE,
|
||||||
)
|
)
|
||||||
from .nuvid import NuvidIE
|
from .nuvid import NuvidIE
|
||||||
|
from .nzz import NZZIE
|
||||||
from .odatv import OdaTVIE
|
from .odatv import OdaTVIE
|
||||||
from .odnoklassniki import OdnoklassnikiIE
|
from .odnoklassniki import OdnoklassnikiIE
|
||||||
from .oktoberfesttv import OktoberfestTVIE
|
from .oktoberfesttv import OktoberfestTVIE
|
||||||
@ -657,6 +673,7 @@ from .orf import (
|
|||||||
ORFFM4IE,
|
ORFFM4IE,
|
||||||
ORFIPTVIE,
|
ORFIPTVIE,
|
||||||
)
|
)
|
||||||
|
from .pandatv import PandaTVIE
|
||||||
from .pandoratv import PandoraTVIE
|
from .pandoratv import PandoraTVIE
|
||||||
from .parliamentliveuk import ParliamentLiveUKIE
|
from .parliamentliveuk import ParliamentLiveUKIE
|
||||||
from .patreon import PatreonIE
|
from .patreon import PatreonIE
|
||||||
@ -730,6 +747,10 @@ from .rbmaradio import RBMARadioIE
|
|||||||
from .rds import RDSIE
|
from .rds import RDSIE
|
||||||
from .redtube import RedTubeIE
|
from .redtube import RedTubeIE
|
||||||
from .regiotv import RegioTVIE
|
from .regiotv import RegioTVIE
|
||||||
|
from .rentv import (
|
||||||
|
RENTVIE,
|
||||||
|
RENTVArticleIE,
|
||||||
|
)
|
||||||
from .restudy import RestudyIE
|
from .restudy import RestudyIE
|
||||||
from .reuters import ReutersIE
|
from .reuters import ReutersIE
|
||||||
from .reverbnation import ReverbNationIE
|
from .reverbnation import ReverbNationIE
|
||||||
@ -786,7 +807,10 @@ from .sendtonews import SendtoNewsIE
|
|||||||
from .servingsys import ServingSysIE
|
from .servingsys import ServingSysIE
|
||||||
from .sexu import SexuIE
|
from .sexu import SexuIE
|
||||||
from .shahid import ShahidIE
|
from .shahid import ShahidIE
|
||||||
from .shared import SharedIE
|
from .shared import (
|
||||||
|
SharedIE,
|
||||||
|
VivoIE,
|
||||||
|
)
|
||||||
from .sharesix import ShareSixIE
|
from .sharesix import ShareSixIE
|
||||||
from .sina import SinaIE
|
from .sina import SinaIE
|
||||||
from .sixplay import SixPlayIE
|
from .sixplay import SixPlayIE
|
||||||
@ -890,8 +914,10 @@ from .theplatform import (
|
|||||||
from .thescene import TheSceneIE
|
from .thescene import TheSceneIE
|
||||||
from .thesixtyone import TheSixtyOneIE
|
from .thesixtyone import TheSixtyOneIE
|
||||||
from .thestar import TheStarIE
|
from .thestar import TheStarIE
|
||||||
|
from .theweatherchannel import TheWeatherChannelIE
|
||||||
from .thisamericanlife import ThisAmericanLifeIE
|
from .thisamericanlife import ThisAmericanLifeIE
|
||||||
from .thisav import ThisAVIE
|
from .thisav import ThisAVIE
|
||||||
|
from .thisoldhouse import ThisOldHouseIE
|
||||||
from .threeqsdn import ThreeQSDNIE
|
from .threeqsdn import ThreeQSDNIE
|
||||||
from .tinypic import TinyPicIE
|
from .tinypic import TinyPicIE
|
||||||
from .tlc import TlcDeIE
|
from .tlc import TlcDeIE
|
||||||
@ -906,6 +932,7 @@ from .tnaflix import (
|
|||||||
MovieFapIE,
|
MovieFapIE,
|
||||||
)
|
)
|
||||||
from .toggle import ToggleIE
|
from .toggle import ToggleIE
|
||||||
|
from .tonline import TOnlineIE
|
||||||
from .toutv import TouTvIE
|
from .toutv import TouTvIE
|
||||||
from .toypics import ToypicsUserIE, ToypicsIE
|
from .toypics import ToypicsUserIE, ToypicsIE
|
||||||
from .traileraddict import TrailerAddictIE
|
from .traileraddict import TrailerAddictIE
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
import re
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
@ -100,7 +99,8 @@ class FacebookIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '"What are you doing running in the snow?"',
|
'title': '"What are you doing running in the snow?"',
|
||||||
'uploader': 'FailArmy',
|
'uploader': 'FailArmy',
|
||||||
}
|
},
|
||||||
|
'skip': 'Video gone',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
|
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
|
||||||
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
|
'md5': '1deb90b6ac27f7efcf6d747c8a27f5e3',
|
||||||
@ -110,6 +110,7 @@ class FacebookIE(InfoExtractor):
|
|||||||
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
|
'title': 'What the Flock Is Going On In New Zealand Credit: ViralHog',
|
||||||
'uploader': 'S. Saint',
|
'uploader': 'S. Saint',
|
||||||
},
|
},
|
||||||
|
'skip': 'Video gone',
|
||||||
}, {
|
}, {
|
||||||
'note': 'swf params escaped',
|
'note': 'swf params escaped',
|
||||||
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
|
'url': 'https://www.facebook.com/barackobama/posts/10153664894881749',
|
||||||
@ -119,6 +120,18 @@ class FacebookIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Facebook video #10153664894881749',
|
'title': 'Facebook video #10153664894881749',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# have 1080P, but only up to 720p in swf params
|
||||||
|
'url': 'https://www.facebook.com/cnn/videos/10155529876156509/',
|
||||||
|
'md5': '0d9813160b146b3bc8744e006027fcc6',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '10155529876156509',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Holocaust survivor becomes US citizen',
|
||||||
|
'timestamp': 1477818095,
|
||||||
|
'upload_date': '20161030',
|
||||||
|
'uploader': 'CNN',
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
|
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -227,43 +240,13 @@ class FacebookIE(InfoExtractor):
|
|||||||
|
|
||||||
video_data = None
|
video_data = None
|
||||||
|
|
||||||
BEFORE = '{swf.addParam(param[0], param[1]);});'
|
server_js_data = self._parse_json(self._search_regex(
|
||||||
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
|
r'handleServerJS\(({.+})(?:\);|,")', webpage, 'server js data', default='{}'), video_id)
|
||||||
PATTERN = re.escape(BEFORE) + '(?:\n|\\\\n)(.*?)' + re.escape(AFTER)
|
for item in server_js_data.get('instances', []):
|
||||||
|
if item[1][0] == 'VideoConfig':
|
||||||
for m in re.findall(PATTERN, webpage):
|
video_data = item[2][0]['videoData']
|
||||||
swf_params = m.replace('\\\\', '\\').replace('\\"', '"')
|
|
||||||
data = dict(json.loads(swf_params))
|
|
||||||
params_raw = compat_urllib_parse_unquote(data['params'])
|
|
||||||
video_data_candidate = json.loads(params_raw)['video_data']
|
|
||||||
for _, f in video_data_candidate.items():
|
|
||||||
if not f:
|
|
||||||
continue
|
|
||||||
if isinstance(f, dict):
|
|
||||||
f = [f]
|
|
||||||
if not isinstance(f, list):
|
|
||||||
continue
|
|
||||||
if f[0].get('video_id') == video_id:
|
|
||||||
video_data = video_data_candidate
|
|
||||||
break
|
|
||||||
if video_data:
|
|
||||||
break
|
break
|
||||||
|
|
||||||
def video_data_list2dict(video_data):
|
|
||||||
ret = {}
|
|
||||||
for item in video_data:
|
|
||||||
format_id = item['stream_type']
|
|
||||||
ret.setdefault(format_id, []).append(item)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
if not video_data:
|
|
||||||
server_js_data = self._parse_json(self._search_regex(
|
|
||||||
r'handleServerJS\(({.+})\);', webpage, 'server js data', default='{}'), video_id)
|
|
||||||
for item in server_js_data.get('instances', []):
|
|
||||||
if item[1][0] == 'VideoConfig':
|
|
||||||
video_data = video_data_list2dict(item[2][0]['videoData'])
|
|
||||||
break
|
|
||||||
|
|
||||||
if not video_data:
|
if not video_data:
|
||||||
if not fatal_if_no_video:
|
if not fatal_if_no_video:
|
||||||
return webpage, False
|
return webpage, False
|
||||||
@ -276,7 +259,8 @@ class FacebookIE(InfoExtractor):
|
|||||||
raise ExtractorError('Cannot parse data')
|
raise ExtractorError('Cannot parse data')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for format_id, f in video_data.items():
|
for f in video_data:
|
||||||
|
format_id = f['stream_type']
|
||||||
if f and isinstance(f, dict):
|
if f and isinstance(f, dict):
|
||||||
f = [f]
|
f = [f]
|
||||||
if not f or not isinstance(f, list):
|
if not f or not isinstance(f, list):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -2,25 +2,27 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .streamable import StreamableIE
|
||||||
|
|
||||||
|
|
||||||
class FootyRoomIE(InfoExtractor):
|
class FootyRoomIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://footyroom\.com/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://footyroom\.com/matches/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://footyroom.com/schalke-04-0-2-real-madrid-2015-02/',
|
'url': 'http://footyroom.com/matches/79922154/hull-city-vs-chelsea/review',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'schalke-04-0-2-real-madrid-2015-02',
|
'id': '79922154',
|
||||||
'title': 'Schalke 04 0 – 2 Real Madrid',
|
'title': 'VIDEO Hull City 0 - 2 Chelsea',
|
||||||
},
|
},
|
||||||
'playlist_count': 3,
|
'playlist_count': 2,
|
||||||
'skip': 'Video for this match is not available',
|
'add_ie': [StreamableIE.ie_key()],
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/',
|
'url': 'http://footyroom.com/matches/75817984/georgia-vs-germany/review',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'georgia-0-2-germany-2015-03',
|
'id': '75817984',
|
||||||
'title': 'Georgia 0 – 2 Germany',
|
'title': 'VIDEO Georgia 0 - 2 Germany',
|
||||||
},
|
},
|
||||||
'playlist_count': 1,
|
'playlist_count': 1,
|
||||||
|
'add_ie': ['Playwire']
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -28,9 +30,8 @@ class FootyRoomIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, playlist_id)
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
|
||||||
playlist = self._parse_json(
|
playlist = self._parse_json(self._search_regex(
|
||||||
self._search_regex(
|
r'DataStore\.media\s*=\s*([^;]+)', webpage, 'media data'),
|
||||||
r'VideoSelector\.load\((\[.+?\])\);', webpage, 'video selector'),
|
|
||||||
playlist_id)
|
playlist_id)
|
||||||
|
|
||||||
playlist_title = self._og_search_title(webpage)
|
playlist_title = self._og_search_title(webpage)
|
||||||
@ -40,11 +41,16 @@ class FootyRoomIE(InfoExtractor):
|
|||||||
payload = video.get('payload')
|
payload = video.get('payload')
|
||||||
if not payload:
|
if not payload:
|
||||||
continue
|
continue
|
||||||
playwire_url = self._search_regex(
|
playwire_url = self._html_search_regex(
|
||||||
r'data-config="([^"]+)"', payload,
|
r'data-config="([^"]+)"', payload,
|
||||||
'playwire url', default=None)
|
'playwire url', default=None)
|
||||||
if playwire_url:
|
if playwire_url:
|
||||||
entries.append(self.url_result(self._proto_relative_url(
|
entries.append(self.url_result(self._proto_relative_url(
|
||||||
playwire_url, 'http:'), 'Playwire'))
|
playwire_url, 'http:'), 'Playwire'))
|
||||||
|
|
||||||
|
streamable_url = StreamableIE._extract_url(payload)
|
||||||
|
if streamable_url:
|
||||||
|
entries.append(self.url_result(
|
||||||
|
streamable_url, StreamableIE.ie_key()))
|
||||||
|
|
||||||
return self.playlist_result(entries, playlist_id, playlist_title)
|
return self.playlist_result(entries, playlist_id, playlist_title)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
@ -27,7 +27,6 @@ from ..utils import (
|
|||||||
unified_strdate,
|
unified_strdate,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
UnsupportedError,
|
UnsupportedError,
|
||||||
url_basename,
|
|
||||||
xpath_text,
|
xpath_text,
|
||||||
)
|
)
|
||||||
from .brightcove import (
|
from .brightcove import (
|
||||||
@ -1209,20 +1208,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'duration': 51690,
|
'duration': 51690,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
# JWPlayer with M3U8
|
|
||||||
{
|
|
||||||
'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'playlist',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ',
|
|
||||||
'uploader': 'ren.tv',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 downloads
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
|
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
|
||||||
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
|
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
|
||||||
{
|
{
|
||||||
@ -1549,7 +1534,7 @@ class GenericIE(InfoExtractor):
|
|||||||
force_videoid = smuggled_data['force_videoid']
|
force_videoid = smuggled_data['force_videoid']
|
||||||
video_id = force_videoid
|
video_id = force_videoid
|
||||||
else:
|
else:
|
||||||
video_id = compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
|
video_id = self._generic_id(url)
|
||||||
|
|
||||||
self.to_screen('%s: Requesting header' % video_id)
|
self.to_screen('%s: Requesting header' % video_id)
|
||||||
|
|
||||||
@ -1578,7 +1563,7 @@ class GenericIE(InfoExtractor):
|
|||||||
|
|
||||||
info_dict = {
|
info_dict = {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]),
|
'title': self._generic_title(url),
|
||||||
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
|
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1754,9 +1739,9 @@ class GenericIE(InfoExtractor):
|
|||||||
if matches:
|
if matches:
|
||||||
return _playlist_from_matches(matches, ie='RtlNl')
|
return _playlist_from_matches(matches, ie='RtlNl')
|
||||||
|
|
||||||
vimeo_url = VimeoIE._extract_vimeo_url(url, webpage)
|
vimeo_urls = VimeoIE._extract_urls(url, webpage)
|
||||||
if vimeo_url is not None:
|
if vimeo_urls:
|
||||||
return self.url_result(vimeo_url)
|
return _playlist_from_matches(vimeo_urls, ie=VimeoIE.ie_key())
|
||||||
|
|
||||||
vid_me_embed_url = self._search_regex(
|
vid_me_embed_url = self._search_regex(
|
||||||
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
|
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
|
||||||
|
@ -4,9 +4,6 @@ import itertools
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import SearchInfoExtractor
|
from .common import SearchInfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_urllib_parse,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class GoogleSearchIE(SearchInfoExtractor):
|
class GoogleSearchIE(SearchInfoExtractor):
|
||||||
@ -34,13 +31,16 @@ class GoogleSearchIE(SearchInfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
for pagenum in itertools.count():
|
for pagenum in itertools.count():
|
||||||
result_url = (
|
|
||||||
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
|
|
||||||
% (compat_urllib_parse.quote_plus(query), pagenum * 10))
|
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
result_url, 'gvsearch:' + query,
|
'http://www.google.com/search',
|
||||||
note='Downloading result page ' + str(pagenum + 1))
|
'gvsearch:' + query,
|
||||||
|
note='Downloading result page %s' % (pagenum + 1),
|
||||||
|
query={
|
||||||
|
'tbm': 'vid',
|
||||||
|
'q': query,
|
||||||
|
'start': pagenum * 10,
|
||||||
|
'hl': 'en',
|
||||||
|
})
|
||||||
|
|
||||||
for hit_idx, mobj in enumerate(re.finditer(
|
for hit_idx, mobj in enumerate(re.finditer(
|
||||||
r'<h3 class="r"><a href="([^"]+)"', webpage)):
|
r'<h3 class="r"><a href="([^"]+)"', webpage)):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -12,17 +12,7 @@ from ..utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class HBOIE(InfoExtractor):
|
class HBOBaseIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?hbo\.com/video/video\.html\?.*vid=(?P<id>[0-9]+)'
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.hbo.com/video/video.html?autoplay=true&g=u&vid=1437839',
|
|
||||||
'md5': '1c33253f0c7782142c993c0ba62a8753',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1437839',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Ep. 64 Clip: Encryption',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_FORMATS_INFO = {
|
_FORMATS_INFO = {
|
||||||
'1920': {
|
'1920': {
|
||||||
'width': 1280,
|
'width': 1280,
|
||||||
@ -50,8 +40,7 @@ class HBOIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _extract_from_id(self, video_id):
|
||||||
video_id = self._match_id(url)
|
|
||||||
video_data = self._download_xml(
|
video_data = self._download_xml(
|
||||||
'http://render.lv3.hbo.com/data/content/global/videos/data/%s.xml' % video_id, video_id)
|
'http://render.lv3.hbo.com/data/content/global/videos/data/%s.xml' % video_id, video_id)
|
||||||
title = xpath_text(video_data, 'title', 'title', True)
|
title = xpath_text(video_data, 'title', 'title', True)
|
||||||
@ -116,7 +105,60 @@ class HBOIE(InfoExtractor):
|
|||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'duration': parse_duration(xpath_element(video_data, 'duration/tv14')),
|
'duration': parse_duration(xpath_text(video_data, 'duration/tv14')),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class HBOIE(HBOBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?hbo\.com/video/video\.html\?.*vid=(?P<id>[0-9]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.hbo.com/video/video.html?autoplay=true&g=u&vid=1437839',
|
||||||
|
'md5': '1c33253f0c7782142c993c0ba62a8753',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1437839',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Ep. 64 Clip: Encryption',
|
||||||
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
|
'duration': 1072,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
return self._extract_from_id(video_id)
|
||||||
|
|
||||||
|
|
||||||
|
class HBOEpisodeIE(HBOBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?hbo\.com/(?!video)([^/]+/)+video/(?P<id>[0-9a-z-]+)\.html'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.hbo.com/girls/episodes/5/52-i-love-you-baby/video/ep-52-inside-the-episode.html?autoplay=true',
|
||||||
|
'md5': '689132b253cc0ab7434237fc3a293210',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1439518',
|
||||||
|
'display_id': 'ep-52-inside-the-episode',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Ep. 52: Inside the Episode',
|
||||||
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
|
'duration': 240,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.hbo.com/game-of-thrones/about/video/season-5-invitation-to-the-set.html?autoplay=true',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'(?P<q1>[\'"])videoId(?P=q1)\s*:\s*(?P<q2>[\'"])(?P<video_id>\d+)(?P=q2)',
|
||||||
|
webpage, 'video ID', group='video_id')
|
||||||
|
|
||||||
|
info_dict = self._extract_from_id(video_id)
|
||||||
|
info_dict['display_id'] = display_id
|
||||||
|
|
||||||
|
return info_dict
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
@ -14,29 +12,24 @@ class HornBunnyIE(InfoExtractor):
|
|||||||
_VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html'
|
_VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html',
|
'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html',
|
||||||
'md5': '95e40865aedd08eff60272b704852ad7',
|
'md5': 'e20fd862d1894b67564c96f180f43924',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5227',
|
'id': '5227',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'panty slut jerk off instruction',
|
'title': 'panty slut jerk off instruction',
|
||||||
'duration': 550,
|
'duration': 550,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
|
'view_count': int,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(url, video_id)
|
||||||
url, video_id, note='Downloading initial webpage')
|
title = self._og_search_title(webpage)
|
||||||
title = self._html_search_regex(
|
info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]
|
||||||
r'class="title">(.*?)</h2>', webpage, 'title')
|
|
||||||
redirect_url = self._html_search_regex(
|
|
||||||
r'pg&settings=(.*?)\|0"\);', webpage, 'title')
|
|
||||||
webpage2 = self._download_webpage(redirect_url, video_id)
|
|
||||||
video_url = self._html_search_regex(
|
|
||||||
r'flvMask:(.*?);', webpage2, 'video_url')
|
|
||||||
|
|
||||||
duration = parse_duration(self._search_regex(
|
duration = parse_duration(self._search_regex(
|
||||||
r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
|
r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
|
||||||
@ -45,12 +38,12 @@ class HornBunnyIE(InfoExtractor):
|
|||||||
r'<strong>Views:</strong>\s*(\d+)</div>',
|
r'<strong>Views:</strong>\s*(\d+)</div>',
|
||||||
webpage, 'view count', fatal=False))
|
webpage, 'view count', fatal=False))
|
||||||
|
|
||||||
return {
|
info_dict.update({
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
|
||||||
'title': title,
|
'title': title,
|
||||||
'ext': 'flv',
|
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
})
|
||||||
|
|
||||||
|
return info_dict
|
||||||
|
56
youtube_dl/extractor/huajiao.py
Normal file
56
youtube_dl/extractor/huajiao.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
parse_duration,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HuajiaoIE(InfoExtractor):
|
||||||
|
IE_DESC = '花椒直播'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.huajiao.com/l/38941232',
|
||||||
|
'md5': 'd08bf9ac98787d24d1e4c0283f2d372d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '38941232',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '#新人求关注#',
|
||||||
|
'description': 're:.*',
|
||||||
|
'duration': 2424.0,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'timestamp': 1475866459,
|
||||||
|
'upload_date': '20161007',
|
||||||
|
'uploader': 'Penny_余姿昀',
|
||||||
|
'uploader_id': '75206005',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
feed_json = self._search_regex(
|
||||||
|
r'var\s+feed\s*=\s*({.+})', webpage, 'feed json')
|
||||||
|
feed = self._parse_json(feed_json, video_id)
|
||||||
|
|
||||||
|
description = self._html_search_meta(
|
||||||
|
'description', webpage, 'description', fatal=False)
|
||||||
|
|
||||||
|
def get(section, field):
|
||||||
|
return feed.get(section, {}).get(field)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': feed['feed']['formated_title'],
|
||||||
|
'description': description,
|
||||||
|
'duration': parse_duration(get('feed', 'duration')),
|
||||||
|
'thumbnail': get('feed', 'image'),
|
||||||
|
'timestamp': parse_iso8601(feed.get('creatime'), ' '),
|
||||||
|
'uploader': get('author', 'nickname'),
|
||||||
|
'uploader_id': get('author', 'uid'),
|
||||||
|
'formats': self._extract_m3u8_formats(
|
||||||
|
feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'),
|
||||||
|
}
|
@ -13,7 +13,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class ImgurIE(InfoExtractor):
|
class ImgurIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:(?:gallery|topic/[^/]+)/)?(?P<id>[a-zA-Z0-9]{6,})(?:[/?#&]+|\.[a-z]+)?$'
|
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:(?:gallery|(?:topic|r)/[^/]+)/)?(?P<id>[a-zA-Z0-9]{6,})(?:[/?#&]+|\.[a-z]+)?$'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://i.imgur.com/A61SaA1.gifv',
|
'url': 'https://i.imgur.com/A61SaA1.gifv',
|
||||||
@ -43,6 +43,9 @@ class ImgurIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://imgur.com/topic/Funny/N8rOudd',
|
'url': 'http://imgur.com/topic/Funny/N8rOudd',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://imgur.com/r/aww/VQcQPhM',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -81,6 +81,9 @@ class IPrimaIE(InfoExtractor):
|
|||||||
for _, src in re.findall(r'src["\']\s*:\s*(["\'])(.+?)\1', playerpage):
|
for _, src in re.findall(r'src["\']\s*:\s*(["\'])(.+?)\1', playerpage):
|
||||||
extract_formats(src)
|
extract_formats(src)
|
||||||
|
|
||||||
|
if not formats and '>GEO_IP_NOT_ALLOWED<' in playerpage:
|
||||||
|
self.raise_geo_restricted()
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
107
youtube_dl/extractor/jamendo.py
Normal file
107
youtube_dl/extractor/jamendo.py
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from ..compat import compat_urlparse
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class JamendoIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?jamendo\.com/track/(?P<id>[0-9]+)/(?P<display_id>[^/?#&]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i',
|
||||||
|
'md5': '6e9e82ed6db98678f171c25a8ed09ffd',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '196219',
|
||||||
|
'display_id': 'stories-from-emona-i',
|
||||||
|
'ext': 'flac',
|
||||||
|
'title': 'Stories from Emona I',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = self._VALID_URL_RE.match(url)
|
||||||
|
track_id = mobj.group('id')
|
||||||
|
display_id = mobj.group('display_id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
title = self._html_search_meta('name', webpage, 'title')
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'url': 'https://%s.jamendo.com/?trackid=%s&format=%s&from=app-97dab294'
|
||||||
|
% (sub_domain, track_id, format_id),
|
||||||
|
'format_id': format_id,
|
||||||
|
'ext': ext,
|
||||||
|
'quality': quality,
|
||||||
|
} for quality, (format_id, sub_domain, ext) in enumerate((
|
||||||
|
('mp31', 'mp3l', 'mp3'),
|
||||||
|
('mp32', 'mp3d', 'mp3'),
|
||||||
|
('ogg1', 'ogg', 'ogg'),
|
||||||
|
('flac', 'flac', 'flac'),
|
||||||
|
))]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
thumbnail = self._html_search_meta(
|
||||||
|
'image', webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': track_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class JamendoAlbumIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?jamendo\.com/album/(?P<id>[0-9]+)/(?P<display_id>[\w-]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://www.jamendo.com/album/121486/duck-on-cover',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '121486',
|
||||||
|
'title': 'Duck On Cover'
|
||||||
|
},
|
||||||
|
'playlist': [{
|
||||||
|
'md5': 'e1a2fcb42bda30dfac990212924149a8',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1032333',
|
||||||
|
'ext': 'flac',
|
||||||
|
'title': 'Warmachine'
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'md5': '1f358d7b2f98edfe90fd55dac0799d50',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1032330',
|
||||||
|
'ext': 'flac',
|
||||||
|
'title': 'Without Your Ghost'
|
||||||
|
}
|
||||||
|
}],
|
||||||
|
'params': {
|
||||||
|
'playlistend': 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = self._VALID_URL_RE.match(url)
|
||||||
|
album_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, mobj.group('display_id'))
|
||||||
|
|
||||||
|
title = self._html_search_meta('name', webpage, 'title')
|
||||||
|
|
||||||
|
entries = [
|
||||||
|
self.url_result(
|
||||||
|
compat_urlparse.urljoin(url, m.group('path')),
|
||||||
|
ie=JamendoIE.ie_key(),
|
||||||
|
video_id=self._search_regex(
|
||||||
|
r'/track/(\d+)', m.group('path'),
|
||||||
|
'track id', default=None))
|
||||||
|
for m in re.finditer(
|
||||||
|
r'<a[^>]+href=(["\'])(?P<path>(?:(?!\1).)+)\1[^>]+class=["\'][^>]*js-trackrow-albumpage-link',
|
||||||
|
webpage)
|
||||||
|
]
|
||||||
|
|
||||||
|
return self.playlist_result(entries, album_id, title)
|
@ -1,4 +1,4 @@
|
|||||||
# coding=utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
128
youtube_dl/extractor/lego.py
Normal file
128
youtube_dl/extractor/lego.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
|
from ..utils import (
|
||||||
|
unescapeHTML,
|
||||||
|
parse_duration,
|
||||||
|
get_element_by_class,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LEGOIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?lego\.com/(?P<locale>[^/]+)/(?:[^/]+/)*videos/(?:[^/]+/)*[^/?#]+-(?P<id>[0-9a-f]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.lego.com/en-us/videos/themes/club/blocumentary-kawaguchi-55492d823b1b4d5e985787fa8c2973b1',
|
||||||
|
'md5': 'f34468f176cfd76488767fc162c405fa',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '55492d823b1b4d5e985787fa8c2973b1',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Blocumentary Great Creations: Akiyuki Kawaguchi',
|
||||||
|
'description': 'Blocumentary Great Creations: Akiyuki Kawaguchi',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# geo-restricted but the contentUrl contain a valid url
|
||||||
|
'url': 'http://www.lego.com/nl-nl/videos/themes/nexoknights/episode-20-kingdom-of-heroes-13bdc2299ab24d9685701a915b3d71e7##sp=399',
|
||||||
|
'md5': '4c3fec48a12e40c6e5995abc3d36cc2e',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '13bdc2299ab24d9685701a915b3d71e7',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Aflevering 20 - Helden van het koninkrijk',
|
||||||
|
'description': 'md5:8ee499aac26d7fa8bcb0cedb7f9c3941',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# special characters in title
|
||||||
|
'url': 'http://www.lego.com/en-us/starwars/videos/lego-star-wars-force-surprise-9685ee9d12e84ff38e84b4e3d0db533d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9685ee9d12e84ff38e84b4e3d0db533d',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Force Surprise – LEGO® Star Wars™ Microfighters',
|
||||||
|
'description': 'md5:9c673c96ce6f6271b88563fe9dc56de3',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
_BITRATES = [256, 512, 1024, 1536, 2560]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
locale, video_id = re.match(self._VALID_URL, url).groups()
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
title = get_element_by_class('video-header', webpage).strip()
|
||||||
|
progressive_base = 'https://lc-mediaplayerns-live-s.legocdn.com/'
|
||||||
|
streaming_base = 'http://legoprod-f.akamaihd.net/'
|
||||||
|
content_url = self._html_search_meta('contentUrl', webpage)
|
||||||
|
path = self._search_regex(
|
||||||
|
r'(?:https?:)?//[^/]+/(?:[iz]/s/)?public/(.+)_[0-9,]+\.(?:mp4|webm)',
|
||||||
|
content_url, 'video path', default=None)
|
||||||
|
if not path:
|
||||||
|
player_url = self._proto_relative_url(self._search_regex(
|
||||||
|
r'<iframe[^>]+src="((?:https?)?//(?:www\.)?lego\.com/[^/]+/mediaplayer/video/[^"]+)',
|
||||||
|
webpage, 'player url', default=None))
|
||||||
|
if not player_url:
|
||||||
|
base_url = self._proto_relative_url(self._search_regex(
|
||||||
|
r'data-baseurl="([^"]+)"', webpage, 'base url',
|
||||||
|
default='http://www.lego.com/%s/mediaplayer/video/' % locale))
|
||||||
|
player_url = base_url + video_id
|
||||||
|
player_webpage = self._download_webpage(player_url, video_id)
|
||||||
|
video_data = self._parse_json(unescapeHTML(self._search_regex(
|
||||||
|
r"video='([^']+)'", player_webpage, 'video data')), video_id)
|
||||||
|
progressive_base = self._search_regex(
|
||||||
|
r'data-video-progressive-url="([^"]+)"',
|
||||||
|
player_webpage, 'progressive base', default='https://lc-mediaplayerns-live-s.legocdn.com/')
|
||||||
|
streaming_base = self._search_regex(
|
||||||
|
r'data-video-streaming-url="([^"]+)"',
|
||||||
|
player_webpage, 'streaming base', default='http://legoprod-f.akamaihd.net/')
|
||||||
|
item_id = video_data['ItemId']
|
||||||
|
|
||||||
|
net_storage_path = video_data.get('NetStoragePath') or '/'.join([item_id[:2], item_id[2:4]])
|
||||||
|
base_path = '_'.join([item_id, video_data['VideoId'], video_data['Locale'], compat_str(video_data['VideoVersion'])])
|
||||||
|
path = '/'.join([net_storage_path, base_path])
|
||||||
|
streaming_path = ','.join(map(lambda bitrate: compat_str(bitrate), self._BITRATES))
|
||||||
|
|
||||||
|
formats = self._extract_akamai_formats(
|
||||||
|
'%si/s/public/%s_,%s,.mp4.csmil/master.m3u8' % (streaming_base, path, streaming_path), video_id)
|
||||||
|
m3u8_formats = list(filter(
|
||||||
|
lambda f: f.get('protocol') == 'm3u8_native' and f.get('vcodec') != 'none' and f.get('resolution') != 'multiple',
|
||||||
|
formats))
|
||||||
|
if len(m3u8_formats) == len(self._BITRATES):
|
||||||
|
self._sort_formats(m3u8_formats)
|
||||||
|
for bitrate, m3u8_format in zip(self._BITRATES, m3u8_formats):
|
||||||
|
progressive_base_url = '%spublic/%s_%d.' % (progressive_base, path, bitrate)
|
||||||
|
mp4_f = m3u8_format.copy()
|
||||||
|
mp4_f.update({
|
||||||
|
'url': progressive_base_url + 'mp4',
|
||||||
|
'format_id': m3u8_format['format_id'].replace('hls', 'mp4'),
|
||||||
|
'protocol': 'http',
|
||||||
|
})
|
||||||
|
web_f = {
|
||||||
|
'url': progressive_base_url + 'webm',
|
||||||
|
'format_id': m3u8_format['format_id'].replace('hls', 'webm'),
|
||||||
|
'width': m3u8_format['width'],
|
||||||
|
'height': m3u8_format['height'],
|
||||||
|
'tbr': m3u8_format.get('tbr'),
|
||||||
|
'ext': 'webm',
|
||||||
|
}
|
||||||
|
formats.extend([web_f, mp4_f])
|
||||||
|
else:
|
||||||
|
for bitrate in self._BITRATES:
|
||||||
|
for ext in ('web', 'mp4'):
|
||||||
|
formats.append({
|
||||||
|
'format_id': '%s-%s' % (ext, bitrate),
|
||||||
|
'url': '%spublic/%s_%d.%s' % (progressive_base, path, bitrate, ext),
|
||||||
|
'tbr': bitrate,
|
||||||
|
'ext': ext,
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': self._html_search_meta('description', webpage),
|
||||||
|
'thumbnail': self._html_search_meta('thumbnail', webpage),
|
||||||
|
'duration': parse_duration(self._html_search_meta('duration', webpage)),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@ -52,8 +51,8 @@ class LiTVIE(InfoExtractor):
|
|||||||
'skip': 'Georestricted to Taiwan',
|
'skip': 'Georestricted to Taiwan',
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_playlist(self, season_list, video_id, vod_data, view_data, prompt=True):
|
def _extract_playlist(self, season_list, video_id, program_info, prompt=True):
|
||||||
episode_title = view_data['title']
|
episode_title = program_info['title']
|
||||||
content_id = season_list['contentId']
|
content_id = season_list['contentId']
|
||||||
|
|
||||||
if prompt:
|
if prompt:
|
||||||
@ -61,7 +60,7 @@ class LiTVIE(InfoExtractor):
|
|||||||
|
|
||||||
all_episodes = [
|
all_episodes = [
|
||||||
self.url_result(smuggle_url(
|
self.url_result(smuggle_url(
|
||||||
self._URL_TEMPLATE % (view_data['contentType'], episode['contentId']),
|
self._URL_TEMPLATE % (program_info['contentType'], episode['contentId']),
|
||||||
{'force_noplaylist': True})) # To prevent infinite recursion
|
{'force_noplaylist': True})) # To prevent infinite recursion
|
||||||
for episode in season_list['episode']]
|
for episode in season_list['episode']]
|
||||||
|
|
||||||
@ -80,19 +79,15 @@ class LiTVIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
view_data = dict(map(lambda t: (t[0], t[2]), re.findall(
|
program_info = self._parse_json(self._search_regex(
|
||||||
r'viewData\.([a-zA-Z]+)\s*=\s*(["\'])([^"\']+)\2',
|
'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
|
||||||
webpage)))
|
|
||||||
|
|
||||||
vod_data = self._parse_json(self._search_regex(
|
|
||||||
'var\s+vod\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
|
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
season_list = list(vod_data.get('seasonList', {}).values())
|
season_list = list(program_info.get('seasonList', {}).values())
|
||||||
if season_list:
|
if season_list:
|
||||||
if not noplaylist:
|
if not noplaylist:
|
||||||
return self._extract_playlist(
|
return self._extract_playlist(
|
||||||
season_list[0], video_id, vod_data, view_data,
|
season_list[0], video_id, program_info,
|
||||||
prompt=noplaylist_prompt)
|
prompt=noplaylist_prompt)
|
||||||
|
|
||||||
if noplaylist_prompt:
|
if noplaylist_prompt:
|
||||||
@ -102,8 +97,8 @@ class LiTVIE(InfoExtractor):
|
|||||||
# endpoint gives the same result as the data embedded in the webpage.
|
# endpoint gives the same result as the data embedded in the webpage.
|
||||||
# If georestricted, there are no embedded data, so an extra request is
|
# If georestricted, there are no embedded data, so an extra request is
|
||||||
# necessary to get the error code
|
# necessary to get the error code
|
||||||
if 'assetId' not in view_data:
|
if 'assetId' not in program_info:
|
||||||
view_data = self._download_json(
|
program_info = self._download_json(
|
||||||
'https://www.litv.tv/vod/ajax/getProgramInfo', video_id,
|
'https://www.litv.tv/vod/ajax/getProgramInfo', video_id,
|
||||||
query={'contentId': video_id},
|
query={'contentId': video_id},
|
||||||
headers={'Accept': 'application/json'})
|
headers={'Accept': 'application/json'})
|
||||||
@ -112,9 +107,9 @@ class LiTVIE(InfoExtractor):
|
|||||||
webpage, 'video data', default='{}'), video_id)
|
webpage, 'video data', default='{}'), video_id)
|
||||||
if not video_data:
|
if not video_data:
|
||||||
payload = {
|
payload = {
|
||||||
'assetId': view_data['assetId'],
|
'assetId': program_info['assetId'],
|
||||||
'watchDevices': view_data['watchDevices'],
|
'watchDevices': program_info['watchDevices'],
|
||||||
'contentType': view_data['contentType'],
|
'contentType': program_info['contentType'],
|
||||||
}
|
}
|
||||||
video_data = self._download_json(
|
video_data = self._download_json(
|
||||||
'https://www.litv.tv/vod/getMainUrl', video_id,
|
'https://www.litv.tv/vod/getMainUrl', video_id,
|
||||||
@ -136,11 +131,11 @@ class LiTVIE(InfoExtractor):
|
|||||||
# LiTV HLS segments doesn't like compressions
|
# LiTV HLS segments doesn't like compressions
|
||||||
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = True
|
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = True
|
||||||
|
|
||||||
title = view_data['title'] + view_data.get('secondaryMark', '')
|
title = program_info['title'] + program_info.get('secondaryMark', '')
|
||||||
description = view_data.get('description')
|
description = program_info.get('description')
|
||||||
thumbnail = view_data.get('imageFile')
|
thumbnail = program_info.get('imageFile')
|
||||||
categories = [item['name'] for item in vod_data.get('category', [])]
|
categories = [item['name'] for item in program_info.get('category', [])]
|
||||||
episode = int_or_none(view_data.get('episode'))
|
episode = int_or_none(program_info.get('episode'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -94,12 +94,12 @@ class LyndaBaseIE(InfoExtractor):
|
|||||||
class LyndaIE(LyndaBaseIE):
|
class LyndaIE(LyndaBaseIE):
|
||||||
IE_NAME = 'lynda'
|
IE_NAME = 'lynda'
|
||||||
IE_DESC = 'lynda.com videos'
|
IE_DESC = 'lynda.com videos'
|
||||||
_VALID_URL = r'https?://(?:www\.)?lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?lynda\.com/(?:[^/]+/[^/]+/(?P<course_id>\d+)|player/embed)/(?P<id>\d+)'
|
||||||
|
|
||||||
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
|
'url': 'https://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
|
||||||
# md5 is unstable
|
# md5 is unstable
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '114408',
|
'id': '114408',
|
||||||
@ -112,19 +112,71 @@ class LyndaIE(LyndaBaseIE):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _raise_unavailable(self, video_id):
|
||||||
|
self.raise_login_required(
|
||||||
|
'Video %s is only available for members' % video_id)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
course_id = mobj.group('course_id')
|
||||||
|
|
||||||
|
query = {
|
||||||
|
'videoId': video_id,
|
||||||
|
'type': 'video',
|
||||||
|
}
|
||||||
|
|
||||||
video = self._download_json(
|
video = self._download_json(
|
||||||
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
|
'https://www.lynda.com/ajax/player', video_id,
|
||||||
video_id, 'Downloading video JSON')
|
'Downloading video JSON', fatal=False, query=query)
|
||||||
|
|
||||||
|
# Fallback scenario
|
||||||
|
if not video:
|
||||||
|
query['courseId'] = course_id
|
||||||
|
|
||||||
|
play = self._download_json(
|
||||||
|
'https://www.lynda.com/ajax/course/%s/%s/play'
|
||||||
|
% (course_id, video_id), video_id, 'Downloading play JSON')
|
||||||
|
|
||||||
|
if not play:
|
||||||
|
self._raise_unavailable(video_id)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for formats_dict in play:
|
||||||
|
urls = formats_dict.get('urls')
|
||||||
|
if not isinstance(urls, dict):
|
||||||
|
continue
|
||||||
|
cdn = formats_dict.get('name')
|
||||||
|
for format_id, format_url in urls.items():
|
||||||
|
if not format_url:
|
||||||
|
continue
|
||||||
|
formats.append({
|
||||||
|
'url': format_url,
|
||||||
|
'format_id': '%s-%s' % (cdn, format_id) if cdn else format_id,
|
||||||
|
'height': int_or_none(format_id),
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
conviva = self._download_json(
|
||||||
|
'https://www.lynda.com/ajax/player/conviva', video_id,
|
||||||
|
'Downloading conviva JSON', query=query)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': conviva['VideoTitle'],
|
||||||
|
'description': conviva.get('VideoDescription'),
|
||||||
|
'release_year': int_or_none(conviva.get('ReleaseYear')),
|
||||||
|
'duration': int_or_none(conviva.get('Duration')),
|
||||||
|
'creator': conviva.get('Author'),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
|
||||||
if 'Status' in video:
|
if 'Status' in video:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'lynda returned error: %s' % video['Message'], expected=True)
|
'lynda returned error: %s' % video['Message'], expected=True)
|
||||||
|
|
||||||
if video.get('HasAccess') is False:
|
if video.get('HasAccess') is False:
|
||||||
self.raise_login_required('Video %s is only available for members' % video_id)
|
self._raise_unavailable(video_id)
|
||||||
|
|
||||||
video_id = compat_str(video.get('ID') or video_id)
|
video_id = compat_str(video.get('ID') or video_id)
|
||||||
duration = int_or_none(video.get('DurationInSeconds'))
|
duration = int_or_none(video.get('DurationInSeconds'))
|
||||||
@ -148,7 +200,7 @@ class LyndaIE(LyndaBaseIE):
|
|||||||
for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
|
for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
|
||||||
formats.extend([{
|
formats.extend([{
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'width': int_or_none(format_id),
|
'height': int_or_none(format_id),
|
||||||
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
|
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
|
||||||
} for format_id, video_url in prioritized_stream.items()])
|
} for format_id, video_url in prioritized_stream.items()])
|
||||||
|
|
||||||
@ -187,7 +239,7 @@ class LyndaIE(LyndaBaseIE):
|
|||||||
return srt
|
return srt
|
||||||
|
|
||||||
def _get_subtitles(self, video_id):
|
def _get_subtitles(self, video_id):
|
||||||
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
|
url = 'https://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
|
||||||
subs = self._download_json(url, None, False)
|
subs = self._download_json(url, None, False)
|
||||||
if subs:
|
if subs:
|
||||||
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
|
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
|
||||||
@ -209,7 +261,7 @@ class LyndaCourseIE(LyndaBaseIE):
|
|||||||
course_id = mobj.group('courseid')
|
course_id = mobj.group('courseid')
|
||||||
|
|
||||||
course = self._download_json(
|
course = self._download_json(
|
||||||
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
'https://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
||||||
course_id, 'Downloading course JSON')
|
course_id, 'Downloading course JSON')
|
||||||
|
|
||||||
if course.get('Status') == 'NotFound':
|
if course.get('Status') == 'NotFound':
|
||||||
@ -231,7 +283,7 @@ class LyndaCourseIE(LyndaBaseIE):
|
|||||||
if video_id:
|
if video_id:
|
||||||
entries.append({
|
entries.append({
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': 'http://www.lynda.com/%s/%s-4.html' % (course_path, video_id),
|
'url': 'https://www.lynda.com/%s/%s-4.html' % (course_path, video_id),
|
||||||
'ie_key': LyndaIE.ie_key(),
|
'ie_key': LyndaIE.ie_key(),
|
||||||
'chapter': chapter.get('Title'),
|
'chapter': chapter.get('Title'),
|
||||||
'chapter_number': int_or_none(chapter.get('ChapterIndex')),
|
'chapter_number': int_or_none(chapter.get('ChapterIndex')),
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -71,12 +71,15 @@ class MicrosoftVirtualAcademyIE(MicrosoftVirtualAcademyBaseIE):
|
|||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
for sources in settings.findall(compat_xpath('.//MediaSources')):
|
for sources in settings.findall(compat_xpath('.//MediaSources')):
|
||||||
if sources.get('videoType') == 'smoothstreaming':
|
sources_type = sources.get('videoType')
|
||||||
continue
|
|
||||||
for source in sources.findall(compat_xpath('./MediaSource')):
|
for source in sources.findall(compat_xpath('./MediaSource')):
|
||||||
video_url = source.text
|
video_url = source.text
|
||||||
if not video_url or not video_url.startswith('http'):
|
if not video_url or not video_url.startswith('http'):
|
||||||
continue
|
continue
|
||||||
|
if sources_type == 'smoothstreaming':
|
||||||
|
formats.extend(self._extract_ism_formats(
|
||||||
|
video_url, video_id, 'mss', fatal=False))
|
||||||
|
continue
|
||||||
video_mode = source.get('videoMode')
|
video_mode = source.get('videoMode')
|
||||||
height = int_or_none(self._search_regex(
|
height = int_or_none(self._search_regex(
|
||||||
r'^(\d+)[pP]$', video_mode or '', 'height', default=None))
|
r'^(\d+)[pP]$', video_mode or '', 'height', default=None))
|
||||||
|
@ -11,7 +11,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class MovieClipsIE(InfoExtractor):
|
class MovieClipsIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www.)?movieclips\.com/videos/.+-(?P<id>\d+)(?:\?|$)'
|
_VALID_URL = r'https?://(?:www\.)?movieclips\.com/videos/.+-(?P<id>\d+)(?:\?|$)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597',
|
'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597',
|
||||||
'md5': '42b5a0352d4933a7bd54f2104f481244',
|
'md5': '42b5a0352d4933a7bd54f2104f481244',
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -69,10 +69,9 @@ class MSNIE(InfoExtractor):
|
|||||||
if not format_url:
|
if not format_url:
|
||||||
continue
|
continue
|
||||||
ext = determine_ext(format_url)
|
ext = determine_ext(format_url)
|
||||||
# .ism is not yet supported (see
|
|
||||||
# https://github.com/rg3/youtube-dl/issues/8118)
|
|
||||||
if ext == 'ism':
|
if ext == 'ism':
|
||||||
continue
|
formats.extend(self._extract_ism_formats(
|
||||||
|
format_url + '/Manifest', display_id, 'mss', fatal=False))
|
||||||
if 'm3u8' in format_url:
|
if 'm3u8' in format_url:
|
||||||
# m3u8_native should not be used here until
|
# m3u8_native should not be used here until
|
||||||
# https://github.com/rg3/youtube-dl/issues/9913 is fixed
|
# https://github.com/rg3/youtube-dl/issues/9913 is fixed
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -4,6 +4,7 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .adobepass import AdobePassIE
|
from .adobepass import AdobePassIE
|
||||||
|
from .theplatform import ThePlatformIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
url_basename,
|
url_basename,
|
||||||
@ -65,7 +66,7 @@ class NationalGeographicVideoIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class NationalGeographicIE(AdobePassIE):
|
class NationalGeographicIE(ThePlatformIE, AdobePassIE):
|
||||||
IE_NAME = 'natgeo'
|
IE_NAME = 'natgeo'
|
||||||
_VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:wild/)?[^/]+/(?:videos|episodes)/(?P<id>[^/?]+)'
|
_VALID_URL = r'https?://channel\.nationalgeographic\.com/(?:wild/)?[^/]+/(?:videos|episodes)/(?P<id>[^/?]+)'
|
||||||
|
|
||||||
@ -110,25 +111,39 @@ class NationalGeographicIE(AdobePassIE):
|
|||||||
release_url = self._search_regex(
|
release_url = self._search_regex(
|
||||||
r'video_auth_playlist_url\s*=\s*"([^"]+)"',
|
r'video_auth_playlist_url\s*=\s*"([^"]+)"',
|
||||||
webpage, 'release url')
|
webpage, 'release url')
|
||||||
|
theplatform_path = self._search_regex(r'https?://link.theplatform.com/s/([^?]+)', release_url, 'theplatform path')
|
||||||
|
video_id = theplatform_path.split('/')[-1]
|
||||||
query = {
|
query = {
|
||||||
'mbr': 'true',
|
'mbr': 'true',
|
||||||
'switch': 'http',
|
|
||||||
}
|
}
|
||||||
is_auth = self._search_regex(r'video_is_auth\s*=\s*"([^"]+)"', webpage, 'is auth', fatal=False)
|
is_auth = self._search_regex(r'video_is_auth\s*=\s*"([^"]+)"', webpage, 'is auth', fatal=False)
|
||||||
if is_auth == 'auth':
|
if is_auth == 'auth':
|
||||||
auth_resource_id = self._search_regex(
|
auth_resource_id = self._search_regex(
|
||||||
r"video_auth_resourceId\s*=\s*'([^']+)'",
|
r"video_auth_resourceId\s*=\s*'([^']+)'",
|
||||||
webpage, 'auth resource id')
|
webpage, 'auth resource id')
|
||||||
query['auth'] = self._extract_mvpd_auth(url, display_id, 'natgeo', auth_resource_id)
|
query['auth'] = self._extract_mvpd_auth(url, video_id, 'natgeo', auth_resource_id)
|
||||||
|
|
||||||
return {
|
formats = []
|
||||||
'_type': 'url_transparent',
|
subtitles = {}
|
||||||
'ie_key': 'ThePlatform',
|
for key, value in (('switch', 'http'), ('manifest', 'm3u')):
|
||||||
'url': smuggle_url(
|
tp_query = query.copy()
|
||||||
update_url_query(release_url, query),
|
tp_query.update({
|
||||||
{'force_smil_url': True}),
|
key: value,
|
||||||
|
})
|
||||||
|
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
||||||
|
update_url_query(release_url, tp_query), video_id, 'Downloading %s SMIL data' % value)
|
||||||
|
formats.extend(tp_formats)
|
||||||
|
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
info = self._extract_theplatform_metadata(theplatform_path, display_id)
|
||||||
|
info.update({
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
}
|
})
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
class NationalGeographicEpisodeGuideIE(InfoExtractor):
|
class NationalGeographicEpisodeGuideIE(InfoExtractor):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -93,7 +93,7 @@ class NextMediaActionNewsIE(NextMediaIE):
|
|||||||
|
|
||||||
class AppleDailyIE(NextMediaIE):
|
class AppleDailyIE(NextMediaIE):
|
||||||
IE_DESC = '臺灣蘋果日報'
|
IE_DESC = '臺灣蘋果日報'
|
||||||
_VALID_URL = r'https?://(www|ent)\.appledaily\.com\.tw/(?:animation|appledaily|enews|realtimenews)/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)(/.*)?'
|
_VALID_URL = r'https?://(www|ent)\.appledaily\.com\.tw/(?:animation|appledaily|enews|realtimenews|actionnews)/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)(/.*)?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694',
|
'url': 'http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694',
|
||||||
'md5': 'a843ab23d150977cc55ef94f1e2c1e4d',
|
'md5': 'a843ab23d150977cc55ef94f1e2c1e4d',
|
||||||
@ -154,6 +154,9 @@ class AppleDailyIE(NextMediaIE):
|
|||||||
'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748',
|
'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748',
|
||||||
'upload_date': '20140417',
|
'upload_date': '20140417',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.appledaily.com.tw/actionnews/appledaily/7/20161003/960588/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_URL_PATTERN = r'\{url: \'(.+)\'\}'
|
_URL_PATTERN = r'\{url: \'(.+)\'\}'
|
||||||
|
@ -245,7 +245,11 @@ class NHLVideocenterCategoryIE(NHLBaseInfoExtractor):
|
|||||||
|
|
||||||
class NHLIE(InfoExtractor):
|
class NHLIE(InfoExtractor):
|
||||||
IE_NAME = 'nhl.com'
|
IE_NAME = 'nhl.com'
|
||||||
_VALID_URL = r'https?://(?:www\.)?nhl\.com/([^/]+/)*c-(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?(?P<site>nhl|wch2016)\.com/(?:[^/]+/)*c-(?P<id>\d+)'
|
||||||
|
_SITES_MAP = {
|
||||||
|
'nhl': 'nhl',
|
||||||
|
'wch2016': 'wch',
|
||||||
|
}
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# type=video
|
# type=video
|
||||||
'url': 'https://www.nhl.com/video/anisimov-cleans-up-mess/t-277752844/c-43663503',
|
'url': 'https://www.nhl.com/video/anisimov-cleans-up-mess/t-277752844/c-43663503',
|
||||||
@ -270,13 +274,32 @@ class NHLIE(InfoExtractor):
|
|||||||
'upload_date': '20160204',
|
'upload_date': '20160204',
|
||||||
'timestamp': 1454544904,
|
'timestamp': 1454544904,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# Some m3u8 URLs are invalid (https://github.com/rg3/youtube-dl/issues/10713)
|
||||||
|
'url': 'https://www.nhl.com/predators/video/poile-laviolette-on-subban-trade/t-277437416/c-44315003',
|
||||||
|
'md5': '50b2bb47f405121484dda3ccbea25459',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '44315003',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Poile, Laviolette on Subban trade',
|
||||||
|
'description': 'General manager David Poile and head coach Peter Laviolette share their thoughts on acquiring P.K. Subban from Montreal (06/29/16)',
|
||||||
|
'timestamp': 1467242866,
|
||||||
|
'upload_date': '20160629',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.wch2016.com/video/caneur-best-of-game-2-micd-up/t-281230378/c-44983703',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.wch2016.com/news/3-stars-team-europe-vs-team-canada/c-282195068',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
tmp_id = self._match_id(url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
tmp_id, site = mobj.group('id'), mobj.group('site')
|
||||||
video_data = self._download_json(
|
video_data = self._download_json(
|
||||||
'https://nhl.bamcontent.com/nhl/id/v1/%s/details/web-v1.json' % tmp_id,
|
'https://nhl.bamcontent.com/%s/id/v1/%s/details/web-v1.json'
|
||||||
tmp_id)
|
% (self._SITES_MAP[site], tmp_id), tmp_id)
|
||||||
if video_data.get('type') == 'article':
|
if video_data.get('type') == 'article':
|
||||||
video_data = video_data['media']
|
video_data = video_data['media']
|
||||||
|
|
||||||
@ -290,9 +313,11 @@ class NHLIE(InfoExtractor):
|
|||||||
continue
|
continue
|
||||||
ext = determine_ext(playback_url)
|
ext = determine_ext(playback_url)
|
||||||
if ext == 'm3u8':
|
if ext == 'm3u8':
|
||||||
formats.extend(self._extract_m3u8_formats(
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
playback_url, video_id, 'mp4', 'm3u8_native',
|
playback_url, video_id, 'mp4', 'm3u8_native',
|
||||||
m3u8_id=playback.get('name', 'hls'), fatal=False))
|
m3u8_id=playback.get('name', 'hls'), fatal=False)
|
||||||
|
self._check_formats(m3u8_formats, video_id)
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
else:
|
else:
|
||||||
height = int_or_none(playback.get('height'))
|
height = int_or_none(playback.get('height'))
|
||||||
formats.append({
|
formats.append({
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from ..utils import update_url_query
|
from ..utils import update_url_query
|
||||||
|
|
||||||
@ -69,7 +71,7 @@ class NickIE(MTVServicesInfoExtractor):
|
|||||||
|
|
||||||
class NickDeIE(MTVServicesInfoExtractor):
|
class NickDeIE(MTVServicesInfoExtractor):
|
||||||
IE_NAME = 'nick.de'
|
IE_NAME = 'nick.de'
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?:nick\.de|nickelodeon\.nl)/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?(?P<host>nick\.de|nickelodeon\.(?:nl|at))/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse',
|
'url': 'http://www.nick.de/playlist/3773-top-videos/videos/episode/17306-zu-wasser-und-zu-land-rauchende-erdnusse',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -79,15 +81,43 @@ class NickDeIE(MTVServicesInfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'url': 'http://www.nickelodeon.nl/shows/474-spongebob/videos/17403-een-kijkje-in-de-keuken-met-sandy-van-binnenuit',
|
'url': 'http://www.nickelodeon.nl/shows/474-spongebob/videos/17403-een-kijkje-in-de-keuken-met-sandy-van-binnenuit',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.nickelodeon.at/playlist/3773-top-videos/videos/episode/77993-das-letzte-gefecht',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _extract_mrss_url(self, webpage, host):
|
||||||
|
return update_url_query(self._search_regex(
|
||||||
|
r'data-mrss=(["\'])(?P<url>http.+?)\1', webpage, 'mrss url', group='url'),
|
||||||
|
{'siteKey': host})
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
host = mobj.group('host')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
mrss_url = update_url_query(self._search_regex(
|
mrss_url = self._extract_mrss_url(webpage, host)
|
||||||
r'data-mrss=(["\'])(?P<url>http.+?)\1', webpage, 'mrss url', group='url'),
|
|
||||||
{'siteKey': 'nick.de'})
|
|
||||||
|
|
||||||
return self._get_videos_info_from_url(mrss_url, video_id)
|
return self._get_videos_info_from_url(mrss_url, video_id)
|
||||||
|
|
||||||
|
|
||||||
|
class NickNightIE(NickDeIE):
|
||||||
|
IE_NAME = 'nicknight'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)(?P<host>nicknight\.(?:de|at|tv))/(?:playlist|shows)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.nicknight.at/shows/977-awkward/videos/85987-nimmer-beste-freunde',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.nicknight.at/shows/977-awkward',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.nicknight.at/shows/1900-faking-it',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _extract_mrss_url(self, webpage, *args):
|
||||||
|
return self._search_regex(
|
||||||
|
r'mrss\s*:\s*(["\'])(?P<url>http.+?)\1', webpage,
|
||||||
|
'mrss url', group='url')
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
62
youtube_dl/extractor/nobelprize.py
Normal file
62
youtube_dl/extractor/nobelprize.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
js_to_json,
|
||||||
|
mimetype2ext,
|
||||||
|
determine_ext,
|
||||||
|
update_url_query,
|
||||||
|
get_element_by_attribute,
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class NobelPrizeIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?nobelprize\.org/mediaplayer.*?\bid=(?P<id>\d+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.nobelprize.org/mediaplayer/?id=2636',
|
||||||
|
'md5': '04c81e5714bb36cc4e2232fee1d8157f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2636',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Announcement of the 2016 Nobel Prize in Physics',
|
||||||
|
'description': 'md5:05beba57f4f5a4bbd4cf2ef28fcff739',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
media = self._parse_json(self._search_regex(
|
||||||
|
r'(?s)var\s*config\s*=\s*({.+?});', webpage,
|
||||||
|
'config'), video_id, js_to_json)['media']
|
||||||
|
title = media['title']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for source in media.get('source', []):
|
||||||
|
source_src = source.get('src')
|
||||||
|
if not source_src:
|
||||||
|
continue
|
||||||
|
ext = mimetype2ext(source.get('type')) or determine_ext(source_src)
|
||||||
|
if ext == 'm3u8':
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
source_src, video_id, 'mp4', 'm3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
elif ext == 'f4m':
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
update_url_query(source_src, {'hdcore': '3.7.0'}),
|
||||||
|
video_id, f4m_id='hds', fatal=False))
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'url': source_src,
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': get_element_by_attribute('itemprop', 'description', webpage),
|
||||||
|
'duration': int_or_none(media.get('duration')),
|
||||||
|
'formats': formats,
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .brightcove import (
|
from .brightcove import (
|
||||||
|
@ -3,6 +3,7 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
@ -10,6 +11,7 @@ from ..utils import (
|
|||||||
qualities,
|
qualities,
|
||||||
strip_jsonp,
|
strip_jsonp,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -181,9 +183,16 @@ class NPOIE(NPOBaseIE):
|
|||||||
continue
|
continue
|
||||||
streams = format_info.get('streams')
|
streams = format_info.get('streams')
|
||||||
if streams:
|
if streams:
|
||||||
video_info = self._download_json(
|
try:
|
||||||
streams[0] + '&type=json',
|
video_info = self._download_json(
|
||||||
video_id, 'Downloading %s stream JSON' % format_id)
|
streams[0] + '&type=json',
|
||||||
|
video_id, 'Downloading %s stream JSON' % format_id)
|
||||||
|
except ExtractorError as ee:
|
||||||
|
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404:
|
||||||
|
error = (self._parse_json(ee.cause.read().decode(), video_id, fatal=False) or {}).get('errorstring')
|
||||||
|
if error:
|
||||||
|
raise ExtractorError(error, expected=True)
|
||||||
|
raise
|
||||||
else:
|
else:
|
||||||
video_info = format_info
|
video_info = format_info
|
||||||
video_url = video_info.get('url')
|
video_url = video_info.get('url')
|
||||||
@ -459,8 +468,9 @@ class NPOPlaylistBaseIE(NPOIE):
|
|||||||
|
|
||||||
class VPROIE(NPOPlaylistBaseIE):
|
class VPROIE(NPOPlaylistBaseIE):
|
||||||
IE_NAME = 'vpro'
|
IE_NAME = 'vpro'
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
|
_VALID_URL = r'https?://(?:www\.)?(?:(?:tegenlicht\.)?vpro|2doc)\.nl/(?:[^/]+/)*(?P<id>[^/]+)\.html'
|
||||||
_PLAYLIST_TITLE_RE = r'<h1[^>]+class=["\'].*?\bmedia-platform-title\b.*?["\'][^>]*>([^<]+)'
|
_PLAYLIST_TITLE_RE = (r'<h1[^>]+class=["\'].*?\bmedia-platform-title\b.*?["\'][^>]*>([^<]+)',
|
||||||
|
r'<h5[^>]+class=["\'].*?\bmedia-platform-subtitle\b.*?["\'][^>]*>([^<]+)')
|
||||||
_PLAYLIST_ENTRY_RE = r'data-media-id="([^"]+)"'
|
_PLAYLIST_ENTRY_RE = r'data-media-id="([^"]+)"'
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
@ -492,6 +502,27 @@ class VPROIE(NPOPlaylistBaseIE):
|
|||||||
'title': 'education education',
|
'title': 'education education',
|
||||||
},
|
},
|
||||||
'playlist_count': 2,
|
'playlist_count': 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.2doc.nl/documentaires/series/2doc/2015/oktober/de-tegenprestatie.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'de-tegenprestatie',
|
||||||
|
'title': 'De Tegenprestatie',
|
||||||
|
},
|
||||||
|
'playlist_count': 2,
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.2doc.nl/speel~VARA_101375237~mh17-het-verdriet-van-nederland~.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'VARA_101375237',
|
||||||
|
'ext': 'm4v',
|
||||||
|
'title': 'MH17: Het verdriet van Nederland',
|
||||||
|
'description': 'md5:09e1a37c1fdb144621e22479691a9f18',
|
||||||
|
'upload_date': '20150716',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# Skip because of m3u8 download
|
||||||
|
'skip_download': True
|
||||||
|
},
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
@ -113,7 +113,17 @@ class NRKBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class NRKIE(NRKBaseIE):
|
class NRKIE(NRKBaseIE):
|
||||||
_VALID_URL = r'(?:nrk:|https?://(?:www\.)?nrk\.no/video/PS\*)(?P<id>\d+)'
|
_VALID_URL = r'''(?x)
|
||||||
|
(?:
|
||||||
|
nrk:|
|
||||||
|
https?://
|
||||||
|
(?:
|
||||||
|
(?:www\.)?nrk\.no/video/PS\*|
|
||||||
|
v8-psapi\.nrk\.no/mediaelement/
|
||||||
|
)
|
||||||
|
)
|
||||||
|
(?P<id>[^/?#&]+)
|
||||||
|
'''
|
||||||
_API_HOST = 'v8.psapi.nrk.no'
|
_API_HOST = 'v8.psapi.nrk.no'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# video
|
# video
|
||||||
@ -137,6 +147,12 @@ class NRKIE(NRKBaseIE):
|
|||||||
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
|
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
|
||||||
'duration': 20,
|
'duration': 20,
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import hmac
|
import hmac
|
||||||
@ -6,11 +7,13 @@ import base64
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
js_to_json,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
determine_ext,
|
parse_iso8601,
|
||||||
|
remove_start,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -138,16 +141,83 @@ class NYTimesArticleIE(NYTimesBaseIE):
|
|||||||
'upload_date': '20150414',
|
'upload_date': '20150414',
|
||||||
'uploader': 'Matthew Williams',
|
'uploader': 'Matthew Williams',
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.nytimes.com/2016/10/14/podcasts/revelations-from-the-final-weeks.html',
|
||||||
|
'md5': 'e0d52040cafb07662acf3c9132db3575',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '100000004709062',
|
||||||
|
'title': 'The Run-Up: ‘He Was Like an Octopus’',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'description': 'md5:fb5c6b93b12efc51649b4847fe066ee4',
|
||||||
|
'series': 'The Run-Up',
|
||||||
|
'episode': '‘He Was Like an Octopus’',
|
||||||
|
'episode_number': 20,
|
||||||
|
'duration': 2130,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.nytimes.com/2016/10/16/books/review/inside-the-new-york-times-book-review-the-rise-of-hitler.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '100000004709479',
|
||||||
|
'title': 'The Rise of Hitler',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'description': 'md5:bce877fd9e3444990cb141875fab0028',
|
||||||
|
'creator': 'Pamela Paul',
|
||||||
|
'duration': 3475,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1',
|
'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _extract_podcast_from_json(self, json, page_id, webpage):
|
||||||
|
podcast_audio = self._parse_json(
|
||||||
|
json, page_id, transform_source=js_to_json)
|
||||||
|
|
||||||
|
audio_data = podcast_audio['data']
|
||||||
|
track = audio_data['track']
|
||||||
|
|
||||||
|
episode_title = track['title']
|
||||||
|
video_url = track['source']
|
||||||
|
|
||||||
|
description = track.get('description') or self._html_search_meta(
|
||||||
|
['og:description', 'twitter:description'], webpage)
|
||||||
|
|
||||||
|
podcast_title = audio_data.get('podcast', {}).get('title')
|
||||||
|
title = ('%s: %s' % (podcast_title, episode_title)
|
||||||
|
if podcast_title else episode_title)
|
||||||
|
|
||||||
|
episode = audio_data.get('podcast', {}).get('episode') or ''
|
||||||
|
episode_number = int_or_none(self._search_regex(
|
||||||
|
r'[Ee]pisode\s+(\d+)', episode, 'episode number', default=None))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': remove_start(podcast_audio.get('target'), 'FT') or page_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'creator': track.get('credit'),
|
||||||
|
'series': podcast_title,
|
||||||
|
'episode': episode_title,
|
||||||
|
'episode_number': episode_number,
|
||||||
|
'duration': int_or_none(track.get('duration')),
|
||||||
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
page_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, page_id)
|
||||||
|
|
||||||
video_id = self._html_search_regex(r'data-videoid="(\d+)"', webpage, 'video id')
|
video_id = self._search_regex(
|
||||||
|
r'data-videoid=["\'](\d+)', webpage, 'video id',
|
||||||
|
default=None, fatal=False)
|
||||||
|
if video_id is not None:
|
||||||
|
return self._extract_video_from_id(video_id)
|
||||||
|
|
||||||
return self._extract_video_from_id(video_id)
|
podcast_data = self._search_regex(
|
||||||
|
(r'NYTD\.FlexTypes\.push\s*\(\s*({.+?})\s*\)\s*;\s*</script',
|
||||||
|
r'NYTD\.FlexTypes\.push\s*\(\s*({.+})\s*\)\s*;'),
|
||||||
|
webpage, 'podcast data')
|
||||||
|
return self._extract_podcast_from_json(podcast_data, page_id, webpage)
|
||||||
|
36
youtube_dl/extractor/nzz.py
Normal file
36
youtube_dl/extractor/nzz.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
extract_attributes,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class NZZIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9153',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 6,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
page_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, page_id)
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for player_element in re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
|
||||||
|
player_params = extract_attributes(player_element)
|
||||||
|
if player_params.get('data-type') not in ('kaltura_singleArticle',):
|
||||||
|
self.report_warning('Unsupported player type')
|
||||||
|
continue
|
||||||
|
entry_id = player_params['data-id']
|
||||||
|
entries.append(self.url_result(
|
||||||
|
'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))
|
||||||
|
|
||||||
|
return self.playlist_result(entries, page_id)
|
@ -1,4 +1,4 @@
|
|||||||
# encoding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
@ -56,8 +56,8 @@ class OnetBaseIE(InfoExtractor):
|
|||||||
continue
|
continue
|
||||||
ext = determine_ext(video_url)
|
ext = determine_ext(video_url)
|
||||||
if format_id == 'ism':
|
if format_id == 'ism':
|
||||||
# TODO: Support Microsoft Smooth Streaming
|
formats.extend(self._extract_ism_formats(
|
||||||
continue
|
video_url, video_id, 'mss', fatal=False))
|
||||||
elif ext == 'mpd':
|
elif ext == 'mpd':
|
||||||
formats.extend(self._extract_mpd_formats(
|
formats.extend(self._extract_mpd_formats(
|
||||||
video_url, video_id, mpd_id='dash', fatal=False))
|
video_url, video_id, mpd_id='dash', fatal=False))
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user