Compare commits
175 Commits
2014.09.24
...
2014.10.13
Author | SHA1 | Date | |
---|---|---|---|
9b36dcbd65 | |||
2aefb886fa | |||
72961c2a8a | |||
4c1ce987b0 | |||
8a2300a597 | |||
1cc887cbf0 | |||
203fb43f36 | |||
4d7b03f1f2 | |||
72ebb5e4b4 | |||
8450c15c25 | |||
b88b45e46c | |||
2417dc1715 | |||
23d83ad4d5 | |||
772ece3571 | |||
2c9f31188b | |||
d18be55533 | |||
ac20fc047a | |||
b4c3c8c172 | |||
3357110a4c | |||
e29fdedb45 | |||
4828703f14 | |||
afe08e0d4a | |||
071420e136 | |||
f4cf848d1d | |||
b7b2ca6e2b | |||
1409704afa | |||
c8e390c2b0 | |||
823f1e015a | |||
3c06d3715e | |||
762958d5af | |||
53d9009bdb | |||
1b725173a5 | |||
0ca41c3d9c | |||
fc6861b175 | |||
b097b5f246 | |||
385009fc44 | |||
ced659bb4d | |||
842cca7d56 | |||
b3826f6c8d | |||
7bc8780c57 | |||
c59c3c84ed | |||
24f7fb5e1e | |||
3b700f8d43 | |||
31d06400ec | |||
642b76ac15 | |||
4c4de296d4 | |||
b10609d98c | |||
3ae165aa10 | |||
e4b85e35d0 | |||
bb0c206f59 | |||
b81f484b60 | |||
5e69192ef7 | |||
e9be9a6acd | |||
f47754f061 | |||
d838b1bd4a | |||
fe506288bd | |||
d397c0b3dd | |||
146c80e256 | |||
f78c01f68b | |||
8489578df4 | |||
10606050bc | |||
d9bf465200 | |||
01384d6e4b | |||
08d5230945 | |||
852f8641e8 | |||
18937a50a4 | |||
e4d6cca0c1 | |||
d5feab9aaa | |||
9e77c60c9a | |||
1414df5ce2 | |||
e80f40e5ca | |||
d3c9af84fc | |||
59d206ca2d | |||
e7b6d12254 | |||
410f3e73ab | |||
07e764439a | |||
f8fb4a7ca8 | |||
e497a7f2ca | |||
a3b6be104d | |||
b7bb0df21e | |||
4dc19c0982 | |||
58ea7ec81e | |||
c0f64ac689 | |||
7a08ad7d59 | |||
2d29ac4f23 | |||
a7a747d687 | |||
fdb4d278bf | |||
59c03a9bfb | |||
e7db973328 | |||
99b67fecc5 | |||
89294b5f50 | |||
72d53356f6 | |||
9e1e67fc15 | |||
1e60e5546e | |||
457749a703 | |||
937f935db0 | |||
80bcefcd77 | |||
8c23945c72 | |||
989b4b2b86 | |||
2a7b4681c6 | |||
8157ae3904 | |||
e50e2fcd4d | |||
6be451f422 | |||
5e4f06197f | |||
761e1645e0 | |||
8ff14175e2 | |||
dbe3043cd6 | |||
a8eb5a8e61 | |||
6043f1df4e | |||
12548cd933 | |||
2593039522 | |||
35d3e63d24 | |||
27aede9074 | |||
f5b7e6a842 | |||
a1f934b171 | |||
a43ee88c6f | |||
e2dce53781 | |||
1770ed9e86 | |||
457ac58cc7 | |||
9c44d2429b | |||
d2e32f7df5 | |||
67077b182b | |||
5f4c318844 | |||
dfee83234b | |||
7f5c0c4a19 | |||
4bc77c8417 | |||
22dd3fad86 | |||
d6e6a42256 | |||
76e7d1e74b | |||
38c4d41b74 | |||
f0b8e3607d | |||
51ee08c4bb | |||
c841789772 | |||
c121a75b36 | |||
5a8b77551d | |||
0217aee154 | |||
b14f3a4c1d | |||
92f7963f6e | |||
88fbe4c2cc | |||
394599f422 | |||
ed9266db90 | |||
f4b1c7adb8 | |||
c95eeb7b80 | |||
5e43e3803c | |||
a89435a7a8 | |||
a0a90b3ba1 | |||
c664182323 | |||
6be1cd4ddb | |||
ee0d90707a | |||
f776d8f608 | |||
b3ac3a51ac | |||
0b75c2a88b | |||
7b7518124e | |||
68b0973046 | |||
3a203b8bfa | |||
70752ccefd | |||
0155549d6c | |||
b66745288e | |||
2a1325fdde | |||
2f9e8776df | |||
497339fa0e | |||
8e6f8051f0 | |||
11b3ce8509 | |||
6a5af6acb9 | |||
9a0d98bb40 | |||
fbd3162e49 | |||
54e9a4af95 | |||
8a32b82e46 | |||
fec02bcc90 | |||
c6e90caaa6 | |||
4bbf157794 | |||
6b08cdf626 | |||
b686fc18da | |||
746c67d72f | |||
5aa38e75b2 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -30,3 +30,4 @@ updates_key.pem
|
|||||||
*.swp
|
*.swp
|
||||||
test/testdata
|
test/testdata
|
||||||
.tox
|
.tox
|
||||||
|
youtube-dl.zsh
|
||||||
|
20
Makefile
20
Makefile
@ -1,7 +1,7 @@
|
|||||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.fish
|
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.fish
|
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish
|
||||||
|
|
||||||
cleanall: clean
|
cleanall: clean
|
||||||
rm -f youtube-dl youtube-dl.exe
|
rm -f youtube-dl youtube-dl.exe
|
||||||
@ -9,6 +9,7 @@ cleanall: clean
|
|||||||
PREFIX ?= /usr/local
|
PREFIX ?= /usr/local
|
||||||
BINDIR ?= $(PREFIX)/bin
|
BINDIR ?= $(PREFIX)/bin
|
||||||
MANDIR ?= $(PREFIX)/man
|
MANDIR ?= $(PREFIX)/man
|
||||||
|
SHAREDIR ?= $(PREFIX)/share
|
||||||
PYTHON ?= /usr/bin/env python
|
PYTHON ?= /usr/bin/env python
|
||||||
|
|
||||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||||
@ -22,13 +23,15 @@ else
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
|
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
install -d $(DESTDIR)$(BINDIR)
|
install -d $(DESTDIR)$(BINDIR)
|
||||||
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
||||||
install -d $(DESTDIR)$(MANDIR)/man1
|
install -d $(DESTDIR)$(MANDIR)/man1
|
||||||
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
|
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
|
||||||
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
|
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
|
||||||
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
|
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
|
||||||
|
install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
|
||||||
|
install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
|
||||||
install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
|
install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
|
||||||
install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
|
install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
|
||||||
|
|
||||||
@ -38,7 +41,7 @@ test:
|
|||||||
|
|
||||||
tar: youtube-dl.tar.gz
|
tar: youtube-dl.tar.gz
|
||||||
|
|
||||||
.PHONY: all clean install test tar bash-completion pypi-files fish-completion
|
.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion
|
||||||
|
|
||||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
|
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
|
||||||
|
|
||||||
@ -66,12 +69,17 @@ youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-co
|
|||||||
|
|
||||||
bash-completion: youtube-dl.bash-completion
|
bash-completion: youtube-dl.bash-completion
|
||||||
|
|
||||||
|
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
|
||||||
|
python devscripts/zsh-completion.py
|
||||||
|
|
||||||
|
zsh-completion: youtube-dl.zsh
|
||||||
|
|
||||||
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
|
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
|
||||||
python devscripts/fish-completion.py
|
python devscripts/fish-completion.py
|
||||||
|
|
||||||
fish-completion: youtube-dl.fish
|
fish-completion: youtube-dl.fish
|
||||||
|
|
||||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.fish
|
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
--exclude '*.kate-swp' \
|
--exclude '*.kate-swp' \
|
||||||
@ -86,5 +94,5 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
|
|||||||
bin devscripts test youtube_dl docs \
|
bin devscripts test youtube_dl docs \
|
||||||
LICENSE README.md README.txt \
|
LICENSE README.md README.txt \
|
||||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
|
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
|
||||||
youtube-dl.fish setup.py \
|
youtube-dl.zsh youtube-dl.fish setup.py \
|
||||||
youtube-dl
|
youtube-dl
|
||||||
|
43
README.md
43
README.md
@ -99,8 +99,6 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
downloaded videos in it.
|
downloaded videos in it.
|
||||||
--include-ads Download advertisements as well
|
--include-ads Download advertisements as well
|
||||||
(experimental)
|
(experimental)
|
||||||
--youtube-include-dash-manifest Try to download the DASH manifest on
|
|
||||||
YouTube videos (experimental)
|
|
||||||
|
|
||||||
## Download Options:
|
## Download Options:
|
||||||
-r, --rate-limit LIMIT maximum download rate in bytes per second
|
-r, --rate-limit LIMIT maximum download rate in bytes per second
|
||||||
@ -158,7 +156,8 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
downloads if possible.
|
downloads if possible.
|
||||||
--no-continue do not resume partially downloaded files
|
--no-continue do not resume partially downloaded files
|
||||||
(restart from beginning)
|
(restart from beginning)
|
||||||
--no-part do not use .part files
|
--no-part do not use .part files - write directly
|
||||||
|
into output file
|
||||||
--no-mtime do not use the Last-modified header to set
|
--no-mtime do not use the Last-modified header to set
|
||||||
the file modification time
|
the file modification time
|
||||||
--write-description write video description to a .description
|
--write-description write video description to a .description
|
||||||
@ -216,7 +215,7 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
information about the video. (Currently
|
information about the video. (Currently
|
||||||
supported only for YouTube)
|
supported only for YouTube)
|
||||||
--user-agent UA specify a custom user agent
|
--user-agent UA specify a custom user agent
|
||||||
--referer REF specify a custom referer, use if the video
|
--referer URL specify a custom referer, use if the video
|
||||||
access is restricted to one domain
|
access is restricted to one domain
|
||||||
--add-header FIELD:VALUE specify a custom HTTP header and its value,
|
--add-header FIELD:VALUE specify a custom HTTP header and its value,
|
||||||
separated by a colon ':'. You can use this
|
separated by a colon ':'. You can use this
|
||||||
@ -241,6 +240,8 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
one is requested
|
one is requested
|
||||||
--max-quality FORMAT highest quality format to download
|
--max-quality FORMAT highest quality format to download
|
||||||
-F, --list-formats list all available formats
|
-F, --list-formats list all available formats
|
||||||
|
--youtube-skip-dash-manifest Do not download the DASH manifest on
|
||||||
|
YouTube videos
|
||||||
|
|
||||||
## Subtitle Options:
|
## Subtitle Options:
|
||||||
--write-sub write subtitle file
|
--write-sub write subtitle file
|
||||||
@ -256,7 +257,7 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
language tags like 'en,pt'
|
language tags like 'en,pt'
|
||||||
|
|
||||||
## Authentication Options:
|
## Authentication Options:
|
||||||
-u, --username USERNAME account username
|
-u, --username USERNAME login with this account ID
|
||||||
-p, --password PASSWORD account password
|
-p, --password PASSWORD account password
|
||||||
-2, --twofactor TWOFACTOR two-factor auth code
|
-2, --twofactor TWOFACTOR two-factor auth code
|
||||||
-n, --netrc use .netrc authentication data
|
-n, --netrc use .netrc authentication data
|
||||||
@ -267,7 +268,7 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
(requires ffmpeg or avconv and ffprobe or
|
(requires ffmpeg or avconv and ffprobe or
|
||||||
avprobe)
|
avprobe)
|
||||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
|
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
|
||||||
"opus", or "wav"; best by default
|
"opus", or "wav"; "best" by default
|
||||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
|
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
|
||||||
insert a value between 0 (better) and 9
|
insert a value between 0 (better) and 9
|
||||||
(worse) for VBR or a specific bitrate like
|
(worse) for VBR or a specific bitrate like
|
||||||
@ -348,21 +349,34 @@ $ youtube-dl --dateafter 20000101 --datebefore 20091231
|
|||||||
|
|
||||||
# FAQ
|
# FAQ
|
||||||
|
|
||||||
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
|
### How do I update youtube-dl?
|
||||||
|
|
||||||
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
|
||||||
|
|
||||||
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version.
|
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
|
||||||
|
|
||||||
Alternatively, uninstall the youtube-dl package and follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html). In a pinch, this should do if you used `apt-get` before to install youtube-dl:
|
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
|
||||||
|
|
||||||
|
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
|
||||||
|
|
||||||
|
sudo apt-get remove -y youtube-dl
|
||||||
|
|
||||||
|
Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo apt-get remove -y youtube-dl
|
|
||||||
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
||||||
sudo chmod a+x /usr/local/bin/youtube-dl
|
sudo chmod a+x /usr/local/bin/youtube-dl
|
||||||
hash -r
|
hash -r
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Again, from then on you'll be able to update with `sudo youtube-dl -U`.
|
||||||
|
|
||||||
|
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
|
||||||
|
|
||||||
|
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
||||||
|
|
||||||
|
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version. See above for a way to update.
|
||||||
|
|
||||||
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
|
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
|
||||||
|
|
||||||
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
|
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
|
||||||
@ -442,8 +456,6 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
@ -451,7 +463,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://yourextractor.com/watch/42',
|
'url': 'http://yourextractor.com/watch/42',
|
||||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '42',
|
'id': '42',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -466,8 +478,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
# TODO more code goes here, for example ...
|
# TODO more code goes here, for example ...
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
28
devscripts/zsh-completion.in
Normal file
28
devscripts/zsh-completion.in
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
#compdef youtube-dl
|
||||||
|
|
||||||
|
__youtube_dl() {
|
||||||
|
local curcontext="$curcontext" fileopts diropts cur prev
|
||||||
|
typeset -A opt_args
|
||||||
|
fileopts="{{fileopts}}"
|
||||||
|
diropts="{{diropts}}"
|
||||||
|
cur=$words[CURRENT]
|
||||||
|
case $cur in
|
||||||
|
:)
|
||||||
|
_arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
prev=$words[CURRENT-1]
|
||||||
|
if [[ ${prev} =~ ${fileopts} ]]; then
|
||||||
|
_path_files
|
||||||
|
elif [[ ${prev} =~ ${diropts} ]]; then
|
||||||
|
_path_files -/
|
||||||
|
elif [[ ${prev} == "--recode-video" ]]; then
|
||||||
|
_arguments '*: :(mp4 flv ogg webm mkv)'
|
||||||
|
else
|
||||||
|
_arguments '*: :({{flags}})'
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
__youtube_dl
|
46
devscripts/zsh-completion.py
Executable file
46
devscripts/zsh-completion.py
Executable file
@ -0,0 +1,46 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import os
|
||||||
|
from os.path import dirname as dirn
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
||||||
|
import youtube_dl
|
||||||
|
|
||||||
|
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
|
||||||
|
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
|
||||||
|
|
||||||
|
|
||||||
|
def build_completion(opt_parser):
|
||||||
|
opts = [opt for group in opt_parser.option_groups
|
||||||
|
for opt in group.option_list]
|
||||||
|
opts_file = [opt for opt in opts if opt.metavar == "FILE"]
|
||||||
|
opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
|
||||||
|
|
||||||
|
fileopts = []
|
||||||
|
for opt in opts_file:
|
||||||
|
if opt._short_opts:
|
||||||
|
fileopts.extend(opt._short_opts)
|
||||||
|
if opt._long_opts:
|
||||||
|
fileopts.extend(opt._long_opts)
|
||||||
|
|
||||||
|
diropts = []
|
||||||
|
for opt in opts_dir:
|
||||||
|
if opt._short_opts:
|
||||||
|
diropts.extend(opt._short_opts)
|
||||||
|
if opt._long_opts:
|
||||||
|
diropts.extend(opt._long_opts)
|
||||||
|
|
||||||
|
flags = [opt.get_opt_string() for opt in opts]
|
||||||
|
|
||||||
|
with open(ZSH_COMPLETION_TEMPLATE) as f:
|
||||||
|
template = f.read()
|
||||||
|
|
||||||
|
template = template.replace("{{fileopts}}", "|".join(fileopts))
|
||||||
|
template = template.replace("{{diropts}}", "|".join(diropts))
|
||||||
|
template = template.replace("{{flags}}", " ".join(flags))
|
||||||
|
|
||||||
|
with open(ZSH_COMPLETION_FILE, "w") as f:
|
||||||
|
f.write(template)
|
||||||
|
|
||||||
|
parser = youtube_dl.parseOpts()[0]
|
||||||
|
build_completion(parser)
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import errno
|
import errno
|
||||||
import io
|
import io
|
||||||
import hashlib
|
import hashlib
|
||||||
@ -12,6 +14,7 @@ from youtube_dl import YoutubeDL
|
|||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
compat_str,
|
compat_str,
|
||||||
preferredencoding,
|
preferredencoding,
|
||||||
|
write_string,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -40,10 +43,10 @@ def report_warning(message):
|
|||||||
If stderr is a tty file the 'WARNING:' will be colored
|
If stderr is a tty file the 'WARNING:' will be colored
|
||||||
'''
|
'''
|
||||||
if sys.stderr.isatty() and os.name != 'nt':
|
if sys.stderr.isatty() and os.name != 'nt':
|
||||||
_msg_header = u'\033[0;33mWARNING:\033[0m'
|
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||||
else:
|
else:
|
||||||
_msg_header = u'WARNING:'
|
_msg_header = 'WARNING:'
|
||||||
output = u'%s %s\n' % (_msg_header, message)
|
output = '%s %s\n' % (_msg_header, message)
|
||||||
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
|
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
|
||||||
output = output.encode(preferredencoding())
|
output = output.encode(preferredencoding())
|
||||||
sys.stderr.write(output)
|
sys.stderr.write(output)
|
||||||
@ -103,22 +106,22 @@ def expect_info_dict(self, expected_dict, got_dict):
|
|||||||
|
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
isinstance(got, compat_str),
|
isinstance(got, compat_str),
|
||||||
u'Expected a %s object, but got %s for field %s' % (
|
'Expected a %s object, but got %s for field %s' % (
|
||||||
compat_str.__name__, type(got).__name__, info_field))
|
compat_str.__name__, type(got).__name__, info_field))
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
match_rex.match(got),
|
match_rex.match(got),
|
||||||
u'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
||||||
elif isinstance(expected, type):
|
elif isinstance(expected, type):
|
||||||
got = got_dict.get(info_field)
|
got = got_dict.get(info_field)
|
||||||
self.assertTrue(isinstance(got, expected),
|
self.assertTrue(isinstance(got, expected),
|
||||||
u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
|
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
|
||||||
else:
|
else:
|
||||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||||
got = 'md5:' + md5(got_dict.get(info_field))
|
got = 'md5:' + md5(got_dict.get(info_field))
|
||||||
else:
|
else:
|
||||||
got = got_dict.get(info_field)
|
got = got_dict.get(info_field)
|
||||||
self.assertEqual(expected, got,
|
self.assertEqual(expected, got,
|
||||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||||
|
|
||||||
# Check for the presence of mandatory fields
|
# Check for the presence of mandatory fields
|
||||||
if got_dict.get('_type') != 'playlist':
|
if got_dict.get('_type') != 'playlist':
|
||||||
@ -126,7 +129,7 @@ def expect_info_dict(self, expected_dict, got_dict):
|
|||||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||||
self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
|
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
||||||
|
|
||||||
# Are checkable fields missing from the test case definition?
|
# Are checkable fields missing from the test case definition?
|
||||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||||
@ -134,7 +137,15 @@ def expect_info_dict(self, expected_dict, got_dict):
|
|||||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
||||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||||
if missing_keys:
|
if missing_keys:
|
||||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
def _repr(v):
|
||||||
|
if isinstance(v, compat_str):
|
||||||
|
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'")
|
||||||
|
else:
|
||||||
|
return repr(v)
|
||||||
|
info_dict_str = ''.join(
|
||||||
|
' %s: %s,\n' % (_repr(k), _repr(v))
|
||||||
|
for k, v in test_info_dict.items())
|
||||||
|
write_string('\n"info_dict": {' + info_dict_str + '}\n', out=sys.stderr)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
missing_keys,
|
missing_keys,
|
||||||
'Missing keys in test definition: %s' % (
|
'Missing keys in test definition: %s' % (
|
||||||
|
@ -139,7 +139,9 @@ def generator(test_case):
|
|||||||
|
|
||||||
if is_playlist:
|
if is_playlist:
|
||||||
self.assertEqual(res_dict['_type'], 'playlist')
|
self.assertEqual(res_dict['_type'], 'playlist')
|
||||||
|
self.assertTrue('entries' in res_dict)
|
||||||
expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
|
expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
|
||||||
|
|
||||||
if 'playlist_mincount' in test_case:
|
if 'playlist_mincount' in test_case:
|
||||||
assertGreaterEqual(
|
assertGreaterEqual(
|
||||||
self,
|
self,
|
||||||
@ -188,7 +190,7 @@ def generator(test_case):
|
|||||||
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
|
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
|
||||||
finally:
|
finally:
|
||||||
try_rm_tcs_files()
|
try_rm_tcs_files()
|
||||||
if is_playlist and res_dict is not None:
|
if is_playlist and res_dict is not None and res_dict.get('entries'):
|
||||||
# Remove all other files that may have been extracted if the
|
# Remove all other files that may have been extracted if the
|
||||||
# extractor returns full results even with extract_flat
|
# extractor returns full results even with extract_flat
|
||||||
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
|
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
|
||||||
|
@ -15,6 +15,7 @@ from youtube_dl.extractor import (
|
|||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
TEDIE,
|
TEDIE,
|
||||||
VimeoIE,
|
VimeoIE,
|
||||||
|
WallaIE,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -279,5 +280,32 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
|||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
|
class TestWallaSubtitles(BaseTestSubtitles):
|
||||||
|
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
|
||||||
|
IE = WallaIE
|
||||||
|
|
||||||
|
def test_list_subtitles(self):
|
||||||
|
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||||
|
self.DL.params['listsubtitles'] = True
|
||||||
|
info_dict = self.getInfoDict()
|
||||||
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
|
def test_allsubtitles(self):
|
||||||
|
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(set(subtitles.keys()), set(['heb']))
|
||||||
|
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
|
||||||
|
|
||||||
|
def test_nosubtitles(self):
|
||||||
|
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
||||||
|
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
|
||||||
|
self.DL.params['writesubtitles'] = True
|
||||||
|
self.DL.params['allsubtitles'] = True
|
||||||
|
subtitles = self.getSubtitles()
|
||||||
|
self.assertEqual(len(subtitles), 0)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -22,7 +22,8 @@ from youtube_dl.utils import (
|
|||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
get_meta_content,
|
get_meta_content,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
PagedList,
|
OnDemandPagedList,
|
||||||
|
InAdvancePagedList,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
read_batch_urls,
|
read_batch_urls,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
@ -43,6 +44,7 @@ from youtube_dl.utils import (
|
|||||||
limit_length,
|
limit_length,
|
||||||
escape_rfc3986,
|
escape_rfc3986,
|
||||||
escape_url,
|
escape_url,
|
||||||
|
js_to_json,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -137,6 +139,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||||
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||||
|
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
||||||
|
|
||||||
def test_find_xpath_attr(self):
|
def test_find_xpath_attr(self):
|
||||||
testxml = '''<root>
|
testxml = '''<root>
|
||||||
@ -246,10 +249,14 @@ class TestUtil(unittest.TestCase):
|
|||||||
for i in range(firstid, upto):
|
for i in range(firstid, upto):
|
||||||
yield i
|
yield i
|
||||||
|
|
||||||
pl = PagedList(get_page, pagesize)
|
pl = OnDemandPagedList(get_page, pagesize)
|
||||||
got = pl.getslice(*sliceargs)
|
got = pl.getslice(*sliceargs)
|
||||||
self.assertEqual(got, expected)
|
self.assertEqual(got, expected)
|
||||||
|
|
||||||
|
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
|
||||||
|
got = iapl.getslice(*sliceargs)
|
||||||
|
self.assertEqual(got, expected)
|
||||||
|
|
||||||
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
||||||
testPL(5, 2, (1,), [1, 2, 3, 4])
|
testPL(5, 2, (1,), [1, 2, 3, 4])
|
||||||
testPL(5, 2, (2,), [2, 3, 4])
|
testPL(5, 2, (2,), [2, 3, 4])
|
||||||
@ -325,5 +332,28 @@ class TestUtil(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||||
|
|
||||||
|
def test_js_to_json_realworld(self):
|
||||||
|
inp = '''{
|
||||||
|
'clip':{'provider':'pseudo'}
|
||||||
|
}'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''{
|
||||||
|
"clip":{"provider":"pseudo"}
|
||||||
|
}''')
|
||||||
|
json.loads(js_to_json(inp))
|
||||||
|
|
||||||
|
inp = '''{
|
||||||
|
'playlist':[{'controls':{'all':null}}]
|
||||||
|
}'''
|
||||||
|
self.assertEqual(js_to_json(inp), '''{
|
||||||
|
"playlist":[{"controls":{"all":null}}]
|
||||||
|
}''')
|
||||||
|
|
||||||
|
def test_js_to_json_edgecases(self):
|
||||||
|
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||||
|
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||||
|
|
||||||
|
on = js_to_json('{"abc": true}')
|
||||||
|
self.assertEqual(json.loads(on), {'abc': True})
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -47,18 +47,6 @@ _TESTS = [
|
|||||||
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
|
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
|
||||||
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
|
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
|
||||||
),
|
),
|
||||||
(
|
|
||||||
'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
|
|
||||||
'swf',
|
|
||||||
86,
|
|
||||||
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
|
|
||||||
),
|
|
||||||
(
|
|
||||||
'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
|
|
||||||
'swf',
|
|
||||||
'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
|
|
||||||
'9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
|
|
||||||
),
|
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
||||||
'js',
|
'js',
|
||||||
|
@ -228,11 +228,11 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
if (sys.version_info >= (3,) and sys.platform != 'win32' and
|
if (sys.version_info >= (3,) and sys.platform != 'win32' and
|
||||||
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
|
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
|
||||||
and not params['restrictfilenames']):
|
and not params.get('restrictfilenames', False)):
|
||||||
# On Python 3, the Unicode filesystem API will throw errors (#1474)
|
# On Python 3, the Unicode filesystem API will throw errors (#1474)
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Assuming --restrict-filenames since file system encoding '
|
'Assuming --restrict-filenames since file system encoding '
|
||||||
'cannot encode all charactes. '
|
'cannot encode all characters. '
|
||||||
'Set the LC_ALL environment variable to fix this.')
|
'Set the LC_ALL environment variable to fix this.')
|
||||||
self.params['restrictfilenames'] = True
|
self.params['restrictfilenames'] = True
|
||||||
|
|
||||||
@ -1250,12 +1250,13 @@ class YoutubeDL(object):
|
|||||||
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
|
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
|
||||||
# To work around aforementioned issue we will replace request's original URL with
|
# To work around aforementioned issue we will replace request's original URL with
|
||||||
# percent-encoded one
|
# percent-encoded one
|
||||||
url = req if isinstance(req, compat_str) else req.get_full_url()
|
req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
|
||||||
|
url = req if req_is_string else req.get_full_url()
|
||||||
url_escaped = escape_url(url)
|
url_escaped = escape_url(url)
|
||||||
|
|
||||||
# Substitute URL if any change after escaping
|
# Substitute URL if any change after escaping
|
||||||
if url != url_escaped:
|
if url != url_escaped:
|
||||||
if isinstance(req, compat_str):
|
if req_is_string:
|
||||||
req = url_escaped
|
req = url_escaped
|
||||||
else:
|
else:
|
||||||
req = compat_urllib_request.Request(
|
req = compat_urllib_request.Request(
|
||||||
|
@ -78,6 +78,7 @@ __authors__ = (
|
|||||||
'Hari Padmanaban',
|
'Hari Padmanaban',
|
||||||
'Carlos Ramos',
|
'Carlos Ramos',
|
||||||
'5moufl',
|
'5moufl',
|
||||||
|
'lenaten',
|
||||||
)
|
)
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
@ -42,6 +42,7 @@ class FileDownloader(object):
|
|||||||
Subclasses of this one must re-define the real_download method.
|
Subclasses of this one must re-define the real_download method.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
_TEST_FILE_SIZE = 10241
|
||||||
params = None
|
params = None
|
||||||
|
|
||||||
def __init__(self, ydl, params):
|
def __init__(self, ydl, params):
|
||||||
|
@ -7,6 +7,7 @@ import subprocess
|
|||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
|
compat_urllib_request,
|
||||||
check_executable,
|
check_executable,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
)
|
)
|
||||||
@ -71,15 +72,26 @@ class NativeHlsFD(FileDownloader):
|
|||||||
else compat_urlparse.urljoin(url, line))
|
else compat_urlparse.urljoin(url, line))
|
||||||
segment_urls.append(segment_url)
|
segment_urls.append(segment_url)
|
||||||
|
|
||||||
|
is_test = self.params.get('test', False)
|
||||||
|
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||||
byte_counter = 0
|
byte_counter = 0
|
||||||
with open(tmpfilename, 'wb') as outf:
|
with open(tmpfilename, 'wb') as outf:
|
||||||
for i, segurl in enumerate(segment_urls):
|
for i, segurl in enumerate(segment_urls):
|
||||||
segment = self.ydl.urlopen(segurl).read()
|
|
||||||
outf.write(segment)
|
|
||||||
byte_counter += len(segment)
|
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[hlsnative] %s: Downloading segment %d / %d' %
|
'[hlsnative] %s: Downloading segment %d / %d' %
|
||||||
(info_dict['id'], i + 1, len(segment_urls)))
|
(info_dict['id'], i + 1, len(segment_urls)))
|
||||||
|
seg_req = compat_urllib_request.Request(segurl)
|
||||||
|
if remaining_bytes is not None:
|
||||||
|
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||||
|
|
||||||
|
segment = self.ydl.urlopen(seg_req).read()
|
||||||
|
if remaining_bytes is not None:
|
||||||
|
segment = segment[:remaining_bytes]
|
||||||
|
remaining_bytes -= len(segment)
|
||||||
|
outf.write(segment)
|
||||||
|
byte_counter += len(segment)
|
||||||
|
if remaining_bytes is not None and remaining_bytes <= 0:
|
||||||
|
break
|
||||||
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': byte_counter,
|
'downloaded_bytes': byte_counter,
|
||||||
|
@ -14,8 +14,6 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class HttpFD(FileDownloader):
|
class HttpFD(FileDownloader):
|
||||||
_TEST_FILE_SIZE = 10241
|
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
tmpfilename = self.temp_name(filename)
|
tmpfilename = self.temp_name(filename)
|
||||||
|
@ -134,13 +134,16 @@ from .gamestar import GameStarIE
|
|||||||
from .gametrailers import GametrailersIE
|
from .gametrailers import GametrailersIE
|
||||||
from .gdcvault import GDCVaultIE
|
from .gdcvault import GDCVaultIE
|
||||||
from .generic import GenericIE
|
from .generic import GenericIE
|
||||||
|
from .globo import GloboIE
|
||||||
from .godtube import GodTubeIE
|
from .godtube import GodTubeIE
|
||||||
|
from .golem import GolemIE
|
||||||
from .googleplus import GooglePlusIE
|
from .googleplus import GooglePlusIE
|
||||||
from .googlesearch import GoogleSearchIE
|
from .googlesearch import GoogleSearchIE
|
||||||
from .gorillavid import GorillaVidIE
|
from .gorillavid import GorillaVidIE
|
||||||
from .goshgay import GoshgayIE
|
from .goshgay import GoshgayIE
|
||||||
from .grooveshark import GroovesharkIE
|
from .grooveshark import GroovesharkIE
|
||||||
from .hark import HarkIE
|
from .hark import HarkIE
|
||||||
|
from .heise import HeiseIE
|
||||||
from .helsinki import HelsinkiIE
|
from .helsinki import HelsinkiIE
|
||||||
from .hentaistigma import HentaiStigmaIE
|
from .hentaistigma import HentaiStigmaIE
|
||||||
from .hornbunny import HornBunnyIE
|
from .hornbunny import HornBunnyIE
|
||||||
@ -188,6 +191,7 @@ from .livestream import (
|
|||||||
LivestreamOriginalIE,
|
LivestreamOriginalIE,
|
||||||
LivestreamShortenerIE,
|
LivestreamShortenerIE,
|
||||||
)
|
)
|
||||||
|
from .lrt import LRTIE
|
||||||
from .lynda import (
|
from .lynda import (
|
||||||
LyndaIE,
|
LyndaIE,
|
||||||
LyndaCourseIE
|
LyndaCourseIE
|
||||||
@ -261,6 +265,7 @@ from .nrk import (
|
|||||||
from .ntv import NTVIE
|
from .ntv import NTVIE
|
||||||
from .nytimes import NYTimesIE
|
from .nytimes import NYTimesIE
|
||||||
from .nuvid import NuvidIE
|
from .nuvid import NuvidIE
|
||||||
|
from .oktoberfesttv import OktoberfestTVIE
|
||||||
from .ooyala import OoyalaIE
|
from .ooyala import OoyalaIE
|
||||||
from .orf import (
|
from .orf import (
|
||||||
ORFTVthekIE,
|
ORFTVthekIE,
|
||||||
@ -271,6 +276,8 @@ from .parliamentliveuk import ParliamentLiveUKIE
|
|||||||
from .patreon import PatreonIE
|
from .patreon import PatreonIE
|
||||||
from .pbs import PBSIE
|
from .pbs import PBSIE
|
||||||
from .photobucket import PhotobucketIE
|
from .photobucket import PhotobucketIE
|
||||||
|
from .planetaplay import PlanetaPlayIE
|
||||||
|
from .played import PlayedIE
|
||||||
from .playfm import PlayFMIE
|
from .playfm import PlayFMIE
|
||||||
from .playvid import PlayvidIE
|
from .playvid import PlayvidIE
|
||||||
from .podomatic import PodomaticIE
|
from .podomatic import PodomaticIE
|
||||||
@ -339,6 +346,8 @@ from .spankwire import SpankwireIE
|
|||||||
from .spiegel import SpiegelIE, SpiegelArticleIE
|
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||||
from .spiegeltv import SpiegeltvIE
|
from .spiegeltv import SpiegeltvIE
|
||||||
from .spike import SpikeIE
|
from .spike import SpikeIE
|
||||||
|
from .sport5 import Sport5IE
|
||||||
|
from .sportbox import SportBoxIE
|
||||||
from .sportdeutschland import SportDeutschlandIE
|
from .sportdeutschland import SportDeutschlandIE
|
||||||
from .stanfordoc import StanfordOpenClassroomIE
|
from .stanfordoc import StanfordOpenClassroomIE
|
||||||
from .steam import SteamIE
|
from .steam import SteamIE
|
||||||
@ -349,6 +358,7 @@ from .swrmediathek import SWRMediathekIE
|
|||||||
from .syfy import SyfyIE
|
from .syfy import SyfyIE
|
||||||
from .sztvhu import SztvHuIE
|
from .sztvhu import SztvHuIE
|
||||||
from .tagesschau import TagesschauIE
|
from .tagesschau import TagesschauIE
|
||||||
|
from .tapely import TapelyIE
|
||||||
from .teachertube import (
|
from .teachertube import (
|
||||||
TeacherTubeIE,
|
TeacherTubeIE,
|
||||||
TeacherTubeUserIE,
|
TeacherTubeUserIE,
|
||||||
@ -361,12 +371,17 @@ from .telemb import TeleMBIE
|
|||||||
from .tenplay import TenPlayIE
|
from .tenplay import TenPlayIE
|
||||||
from .testurl import TestURLIE
|
from .testurl import TestURLIE
|
||||||
from .tf1 import TF1IE
|
from .tf1 import TF1IE
|
||||||
|
from .theonion import TheOnionIE
|
||||||
from .theplatform import ThePlatformIE
|
from .theplatform import ThePlatformIE
|
||||||
|
from .thesixtyone import TheSixtyOneIE
|
||||||
from .thisav import ThisAVIE
|
from .thisav import ThisAVIE
|
||||||
from .tinypic import TinyPicIE
|
from .tinypic import TinyPicIE
|
||||||
from .tlc import TlcIE, TlcDeIE
|
from .tlc import TlcIE, TlcDeIE
|
||||||
from .tnaflix import TNAFlixIE
|
from .tnaflix import TNAFlixIE
|
||||||
from .thvideo import THVideoIE
|
from .thvideo import (
|
||||||
|
THVideoIE,
|
||||||
|
THVideoPlaylistIE
|
||||||
|
)
|
||||||
from .toutv import TouTvIE
|
from .toutv import TouTvIE
|
||||||
from .toypics import ToypicsUserIE, ToypicsIE
|
from .toypics import ToypicsUserIE, ToypicsIE
|
||||||
from .traileraddict import TrailerAddictIE
|
from .traileraddict import TrailerAddictIE
|
||||||
@ -407,11 +422,12 @@ from .videoweed import VideoWeedIE
|
|||||||
from .vidme import VidmeIE
|
from .vidme import VidmeIE
|
||||||
from .vimeo import (
|
from .vimeo import (
|
||||||
VimeoIE,
|
VimeoIE,
|
||||||
VimeoChannelIE,
|
|
||||||
VimeoUserIE,
|
|
||||||
VimeoAlbumIE,
|
VimeoAlbumIE,
|
||||||
|
VimeoChannelIE,
|
||||||
VimeoGroupsIE,
|
VimeoGroupsIE,
|
||||||
|
VimeoLikesIE,
|
||||||
VimeoReviewIE,
|
VimeoReviewIE,
|
||||||
|
VimeoUserIE,
|
||||||
VimeoWatchLaterIE,
|
VimeoWatchLaterIE,
|
||||||
)
|
)
|
||||||
from .vimple import VimpleIE
|
from .vimple import VimpleIE
|
||||||
@ -426,6 +442,7 @@ from .vporn import VpornIE
|
|||||||
from .vube import VubeIE
|
from .vube import VubeIE
|
||||||
from .vuclip import VuClipIE
|
from .vuclip import VuClipIE
|
||||||
from .vulture import VultureIE
|
from .vulture import VultureIE
|
||||||
|
from .walla import WallaIE
|
||||||
from .washingtonpost import WashingtonPostIE
|
from .washingtonpost import WashingtonPostIE
|
||||||
from .wat import WatIE
|
from .wat import WatIE
|
||||||
from .wayofthemaster import WayOfTheMasterIE
|
from .wayofthemaster import WayOfTheMasterIE
|
||||||
@ -447,9 +464,9 @@ from .xvideos import XVideosIE
|
|||||||
from .xtube import XTubeUserIE, XTubeIE
|
from .xtube import XTubeUserIE, XTubeIE
|
||||||
from .yahoo import (
|
from .yahoo import (
|
||||||
YahooIE,
|
YahooIE,
|
||||||
YahooNewsIE,
|
|
||||||
YahooSearchIE,
|
YahooSearchIE,
|
||||||
)
|
)
|
||||||
|
from .ynet import YnetIE
|
||||||
from .youjizz import YouJizzIE
|
from .youjizz import YouJizzIE
|
||||||
from .youku import YoukuIE
|
from .youku import YoukuIE
|
||||||
from .youporn import YouPornIE
|
from .youporn import YouPornIE
|
||||||
|
@ -22,8 +22,7 @@ class ABCIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
urls_info_json = self._search_regex(
|
urls_info_json = self._search_regex(
|
||||||
|
@ -35,7 +35,7 @@ class AnySexIE(InfoExtractor):
|
|||||||
|
|
||||||
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
|
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
|
r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
|
||||||
thumbnail = self._html_search_regex(
|
thumbnail = self._html_search_regex(
|
||||||
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
|
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ class AnySexIE(InfoExtractor):
|
|||||||
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
|
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
|
||||||
|
|
||||||
duration = parse_duration(self._search_regex(
|
duration = parse_duration(self._search_regex(
|
||||||
r'<b>Duration:</b> (\d+:\d+)', webpage, 'duration', fatal=False))
|
r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
|
||||||
view_count = int_or_none(self._html_search_regex(
|
view_count = int_or_none(self._html_search_regex(
|
||||||
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
|
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
|
||||||
|
|
||||||
|
@ -8,8 +8,6 @@ from ..utils import (
|
|||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
qualities,
|
qualities,
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_urllib_parse,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
@ -86,11 +86,15 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
info = self._download_json(json_url, video_id)
|
info = self._download_json(json_url, video_id)
|
||||||
player_info = info['videoJsonPlayer']
|
player_info = info['videoJsonPlayer']
|
||||||
|
|
||||||
|
upload_date_str = player_info.get('shootingDate')
|
||||||
|
if not upload_date_str:
|
||||||
|
upload_date_str = player_info.get('VDA', '').split(' ')[0]
|
||||||
|
|
||||||
info_dict = {
|
info_dict = {
|
||||||
'id': player_info['VID'],
|
'id': player_info['VID'],
|
||||||
'title': player_info['VTI'],
|
'title': player_info['VTI'],
|
||||||
'description': player_info.get('VDE'),
|
'description': player_info.get('VDE'),
|
||||||
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
|
'upload_date': unified_strdate(upload_date_str),
|
||||||
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,13 +15,23 @@ class BandcampIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
||||||
'file': '1812978515.mp3',
|
|
||||||
'md5': 'c557841d5e50261777a6585648adf439',
|
'md5': 'c557841d5e50261777a6585648adf439',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
'id': '1812978515',
|
||||||
"duration": 9.8485,
|
'ext': 'mp3',
|
||||||
|
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
||||||
|
'duration': 9.8485,
|
||||||
},
|
},
|
||||||
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
||||||
|
}, {
|
||||||
|
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
||||||
|
'md5': '2b68e5851514c20efdff2afc5603b8b4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2650410135',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Lanius (Battle)',
|
||||||
|
'uploader': 'Ben Prunty Music',
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -59,9 +69,9 @@ class BandcampIE(InfoExtractor):
|
|||||||
raise ExtractorError('No free songs found')
|
raise ExtractorError('No free songs found')
|
||||||
|
|
||||||
download_link = m_download.group(1)
|
download_link = m_download.group(1)
|
||||||
video_id = re.search(
|
video_id = self._search_regex(
|
||||||
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
|
r'var TralbumData = {.*?id: (?P<id>\d+),?$',
|
||||||
webpage, re.MULTILINE | re.DOTALL).group('id')
|
webpage, 'video id', flags=re.MULTILINE | re.DOTALL)
|
||||||
|
|
||||||
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
|
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
|
||||||
# We get the dictionary of the track from some javascript code
|
# We get the dictionary of the track from some javascript code
|
||||||
|
@ -26,6 +26,8 @@ class BRIE(InfoExtractor):
|
|||||||
'title': 'Wenn das Traditions-Theater wackelt',
|
'title': 'Wenn das Traditions-Theater wackelt',
|
||||||
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
|
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
|
||||||
'duration': 34,
|
'duration': 34,
|
||||||
|
'uploader': 'BR',
|
||||||
|
'upload_date': '20140802',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -66,8 +68,7 @@ class BRIE(InfoExtractor):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
display_id = self._match_id(url)
|
||||||
display_id = mobj.group('id')
|
|
||||||
page = self._download_webpage(url, display_id)
|
page = self._download_webpage(url, display_id)
|
||||||
xml_url = self._search_regex(
|
xml_url = self._search_regex(
|
||||||
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
|
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
|
||||||
|
@ -4,37 +4,61 @@ import re
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_age_limit,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BreakIE(InfoExtractor):
|
class BreakIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
|
_VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||||
'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
|
'md5': '33aa4ff477ecd124d18d7b5d23b87ce5',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2468056',
|
'id': '2468056',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'When Girls Act Like D-Bags',
|
'title': 'When Girls Act Like D-Bags',
|
||||||
}
|
}
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group(1).split("-")[-1]
|
webpage = self._download_webpage(
|
||||||
embed_url = 'http://www.break.com/embed/%s' % video_id
|
'http://www.break.com/embed/%s' % video_id, video_id)
|
||||||
webpage = self._download_webpage(embed_url, video_id)
|
info = json.loads(self._search_regex(
|
||||||
info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>',
|
r'var embedVars = ({.*})\s*?</script>',
|
||||||
webpage, 'info json', flags=re.DOTALL)
|
webpage, 'info json', flags=re.DOTALL))
|
||||||
info = json.loads(info_json)
|
|
||||||
video_url = info['videoUri']
|
|
||||||
youtube_id = info.get('youtubeId')
|
youtube_id = info.get('youtubeId')
|
||||||
if youtube_id:
|
if youtube_id:
|
||||||
return self.url_result(youtube_id, 'Youtube')
|
return self.url_result(youtube_id, 'Youtube')
|
||||||
|
|
||||||
final_url = video_url + '?' + info['AuthToken']
|
formats = [{
|
||||||
|
'url': media['uri'] + '?' + info['AuthToken'],
|
||||||
|
'tbr': media['bitRate'],
|
||||||
|
'width': media['width'],
|
||||||
|
'height': media['height'],
|
||||||
|
} for media in info['media']]
|
||||||
|
|
||||||
|
if not formats:
|
||||||
|
formats.append({
|
||||||
|
'url': info['videoUri']
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
duration = int_or_none(info.get('videoLengthInSeconds'))
|
||||||
|
age_limit = parse_age_limit(info.get('audienceRating'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': final_url,
|
|
||||||
'title': info['contentName'],
|
'title': info['contentName'],
|
||||||
'thumbnail': info['thumbUri'],
|
'thumbnail': info['thumbUri'],
|
||||||
|
'duration': duration,
|
||||||
|
'age_limit': age_limit,
|
||||||
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,6 @@ class CliphunterIE(InfoExtractor):
|
|||||||
'title': 'Fun Jynx Maze solo',
|
'title': 'Fun Jynx Maze solo',
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'duration': 1317,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,14 +85,11 @@ class CliphunterIE(InfoExtractor):
|
|||||||
thumbnail = self._search_regex(
|
thumbnail = self._search_regex(
|
||||||
r"var\s+mov_thumb\s*=\s*'([^']+)';",
|
r"var\s+mov_thumb\s*=\s*'([^']+)';",
|
||||||
webpage, 'thumbnail', fatal=False)
|
webpage, 'thumbnail', fatal=False)
|
||||||
duration = int_or_none(self._search_regex(
|
|
||||||
r'pl_dur\s*=\s*([0-9]+)', webpage, 'duration', fatal=False))
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'duration': duration,
|
|
||||||
'age_limit': self._rta_search(webpage),
|
'age_limit': self._rta_search(webpage),
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
import datetime
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import netrc
|
import netrc
|
||||||
@ -21,6 +22,7 @@ from ..utils import (
|
|||||||
clean_html,
|
clean_html,
|
||||||
compiled_regex_type,
|
compiled_regex_type,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
@ -136,6 +138,8 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
Unless mentioned otherwise, the fields should be Unicode strings.
|
Unless mentioned otherwise, the fields should be Unicode strings.
|
||||||
|
|
||||||
|
Unless mentioned otherwise, None is equivalent to absence of information.
|
||||||
|
|
||||||
Subclasses of this one should re-define the _real_initialize() and
|
Subclasses of this one should re-define the _real_initialize() and
|
||||||
_real_extract() methods and define a _VALID_URL regexp.
|
_real_extract() methods and define a _VALID_URL regexp.
|
||||||
Probably, they should also be added to the list of extractors.
|
Probably, they should also be added to the list of extractors.
|
||||||
@ -164,6 +168,14 @@ class InfoExtractor(object):
|
|||||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||||
return cls._VALID_URL_RE.match(url) is not None
|
return cls._VALID_URL_RE.match(url) is not None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _match_id(cls, url):
|
||||||
|
if '_VALID_URL_RE' not in cls.__dict__:
|
||||||
|
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||||
|
m = cls._VALID_URL_RE.match(url)
|
||||||
|
assert m
|
||||||
|
return m.group('id')
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def working(cls):
|
def working(cls):
|
||||||
"""Getter method for _WORKING."""
|
"""Getter method for _WORKING."""
|
||||||
@ -324,7 +336,11 @@ class InfoExtractor(object):
|
|||||||
try:
|
try:
|
||||||
return json.loads(json_string)
|
return json.loads(json_string)
|
||||||
except ValueError as ve:
|
except ValueError as ve:
|
||||||
raise ExtractorError('Failed to download JSON', cause=ve)
|
errmsg = '%s: Failed to parse JSON ' % video_id
|
||||||
|
if fatal:
|
||||||
|
raise ExtractorError(errmsg, cause=ve)
|
||||||
|
else:
|
||||||
|
self.report_warning(errmsg + str(ve))
|
||||||
|
|
||||||
def report_warning(self, msg, video_id=None):
|
def report_warning(self, msg, video_id=None):
|
||||||
idstr = '' if video_id is None else '%s: ' % video_id
|
idstr = '' if video_id is None else '%s: ' % video_id
|
||||||
@ -705,6 +721,34 @@ class InfoExtractor(object):
|
|||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
|
def _live_title(self, name):
|
||||||
|
""" Generate the title for a live video """
|
||||||
|
now = datetime.datetime.now()
|
||||||
|
now_str = now.strftime("%Y-%m-%d %H:%M")
|
||||||
|
return name + ' ' + now_str
|
||||||
|
|
||||||
|
def _int(self, v, name, fatal=False, **kwargs):
|
||||||
|
res = int_or_none(v, **kwargs)
|
||||||
|
if 'get_attr' in kwargs:
|
||||||
|
print(getattr(v, kwargs['get_attr']))
|
||||||
|
if res is None:
|
||||||
|
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||||
|
if fatal:
|
||||||
|
raise ExtractorError(msg)
|
||||||
|
else:
|
||||||
|
self._downloader.report_warning(msg)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def _float(self, v, name, fatal=False, **kwargs):
|
||||||
|
res = float_or_none(v, **kwargs)
|
||||||
|
if res is None:
|
||||||
|
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||||
|
if fatal:
|
||||||
|
raise ExtractorError(msg)
|
||||||
|
else:
|
||||||
|
self._downloader.report_warning(msg)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
class SearchInfoExtractor(InfoExtractor):
|
class SearchInfoExtractor(InfoExtractor):
|
||||||
"""
|
"""
|
||||||
|
@ -9,7 +9,7 @@ import xml.etree.ElementTree
|
|||||||
|
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from math import pow, sqrt, floor
|
from math import pow, sqrt, floor
|
||||||
from .common import InfoExtractor
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
@ -26,7 +26,7 @@ from ..aes import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CrunchyrollIE(InfoExtractor):
|
class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||||
@ -271,6 +271,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
|||||||
else:
|
else:
|
||||||
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
||||||
|
|
||||||
|
if self._downloader.params.get('listsubtitles', False):
|
||||||
|
self._list_available_subtitles(video_id, subtitles)
|
||||||
|
return
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
|
@ -82,11 +82,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# Extract id and simplified title from URL
|
video_id = self._match_id(url)
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
url = 'http://www.dailymotion.com/video/%s' % video_id
|
url = 'http://www.dailymotion.com/video/%s' % video_id
|
||||||
|
|
||||||
# Retrieve video webpage to extract further information
|
# Retrieve video webpage to extract further information
|
||||||
@ -147,18 +143,23 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
self._list_available_subtitles(video_id, webpage)
|
self._list_available_subtitles(video_id, webpage)
|
||||||
return
|
return
|
||||||
|
|
||||||
view_count = self._search_regex(
|
view_count = str_to_int(self._search_regex(
|
||||||
r'video_views_count[^>]+>\s+([\d\.,]+)', webpage, 'view count', fatal=False)
|
r'video_views_count[^>]+>\s+([\d\.,]+)',
|
||||||
if view_count is not None:
|
webpage, 'view count', fatal=False))
|
||||||
view_count = str_to_int(view_count)
|
|
||||||
|
title = self._og_search_title(webpage, default=None)
|
||||||
|
if title is None:
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
|
||||||
|
'title')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'uploader': info['owner.screenname'],
|
'uploader': info['owner.screenname'],
|
||||||
'upload_date': video_upload_date,
|
'upload_date': video_upload_date,
|
||||||
'title': self._og_search_title(webpage),
|
'title': title,
|
||||||
'subtitles': video_subtitles,
|
'subtitles': video_subtitles,
|
||||||
'thumbnail': info['thumbnail_url'],
|
'thumbnail': info['thumbnail_url'],
|
||||||
'age_limit': age_limit,
|
'age_limit': age_limit,
|
||||||
'view_count': view_count,
|
'view_count': view_count,
|
||||||
|
@ -29,9 +29,8 @@ class DropboxIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
fn = compat_urllib_parse_unquote(url_basename(url))
|
fn = compat_urllib_parse_unquote(url_basename(url))
|
||||||
title = os.path.splitext(fn)[0]
|
title = os.path.splitext(fn)[0]
|
||||||
video_url = (
|
video_url = re.sub(r'[?&]dl=0', '', url)
|
||||||
re.sub(r'[?&]dl=0', '', url) +
|
video_url += ('?' if '?' not in video_url else '&') + 'dl=1'
|
||||||
('?' if '?' in url else '&') + 'dl=1')
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -7,20 +9,20 @@ from ..utils import ExtractorError
|
|||||||
|
|
||||||
|
|
||||||
class EitbIE(InfoExtractor):
|
class EitbIE(InfoExtractor):
|
||||||
IE_NAME = u'eitb.tv'
|
IE_NAME = 'eitb.tv'
|
||||||
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
|
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'add_ie': ['Brightcove'],
|
'add_ie': ['Brightcove'],
|
||||||
u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
||||||
u'md5': u'edf4436247185adee3ea18ce64c47998',
|
'md5': 'edf4436247185adee3ea18ce64c47998',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
u'id': u'2743577154001',
|
'id': '2743577154001',
|
||||||
u'ext': u'mp4',
|
'ext': 'mp4',
|
||||||
u'title': u'60 minutos (Lasa y Zabala, 30 años)',
|
'title': '60 minutos (Lasa y Zabala, 30 años)',
|
||||||
# All videos from eitb has this description in the brightcove info
|
# All videos from eitb has this description in the brightcove info
|
||||||
u'description': u'.',
|
'description': '.',
|
||||||
u'uploader': u'Euskal Telebista',
|
'uploader': 'Euskal Telebista',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -30,7 +32,7 @@ class EitbIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(url, chapter_id)
|
webpage = self._download_webpage(url, chapter_id)
|
||||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||||
if bc_url is None:
|
if bc_url is None:
|
||||||
raise ExtractorError(u'Could not extract the Brightcove url')
|
raise ExtractorError('Could not extract the Brightcove url')
|
||||||
# The BrightcoveExperience object doesn't contain the video id, we set
|
# The BrightcoveExperience object doesn't contain the video id, we set
|
||||||
# it manually
|
# it manually
|
||||||
bc_url += '&%40videoPlayer={0}'.format(chapter_id)
|
bc_url += '&%40videoPlayer={0}'.format(chapter_id)
|
||||||
|
@ -14,11 +14,11 @@ class EpornerIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
|
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
|
||||||
'md5': '3b427ae4b9d60619106de3185c2987cd',
|
'md5': '39d486f046212d8e1b911c52ab4691f8',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '95008',
|
'id': '95008',
|
||||||
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
|
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Infamous Tiffany Teen Strip Tease Video',
|
'title': 'Infamous Tiffany Teen Strip Tease Video',
|
||||||
'duration': 194,
|
'duration': 194,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
|
@ -7,6 +7,7 @@ from ..utils import (
|
|||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
|
str_to_int,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -20,6 +21,7 @@ class ExtremeTubeIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Music Video 14 british euro brit european cumshots swallow',
|
'title': 'Music Video 14 british euro brit european cumshots swallow',
|
||||||
'uploader': 'unknown',
|
'uploader': 'unknown',
|
||||||
|
'view_count': int,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
@ -39,8 +41,12 @@ class ExtremeTubeIE(InfoExtractor):
|
|||||||
video_title = self._html_search_regex(
|
video_title = self._html_search_regex(
|
||||||
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
|
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
|
||||||
uploader = self._html_search_regex(
|
uploader = self._html_search_regex(
|
||||||
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
|
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
|
||||||
fatal=False)
|
webpage, 'uploader', fatal=False)
|
||||||
|
view_count = str_to_int(self._html_search_regex(
|
||||||
|
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
|
||||||
|
webpage, 'view count', fatal=False))
|
||||||
|
|
||||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(
|
video_url = compat_urllib_parse.unquote(self._html_search_regex(
|
||||||
r'video_url=(.+?)&', webpage, 'video_url'))
|
r'video_url=(.+?)&', webpage, 'video_url'))
|
||||||
path = compat_urllib_parse_urlparse(video_url).path
|
path = compat_urllib_parse_urlparse(video_url).path
|
||||||
@ -51,6 +57,7 @@ class ExtremeTubeIE(InfoExtractor):
|
|||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_title,
|
'title': video_title,
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
|
'view_count': view_count,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'format': format,
|
'format': format,
|
||||||
'format_id': format,
|
'format_id': format,
|
||||||
|
@ -35,7 +35,7 @@ class FacebookIE(InfoExtractor):
|
|||||||
'id': '637842556329505',
|
'id': '637842556329505',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'duration': 38,
|
'duration': 38,
|
||||||
'title': 'Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam fin...',
|
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'note': 'Video without discernible title',
|
'note': 'Video without discernible title',
|
||||||
|
@ -21,7 +21,7 @@ class FunnyOrDieIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.funnyordie.com/embed/e402820827',
|
'url': 'http://www.funnyordie.com/embed/e402820827',
|
||||||
'md5': 'ff4d83318f89776ed0250634cfaa8d36',
|
'md5': '29f4c5e5a61ca39dfd7e8348a75d0aad',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'e402820827',
|
'id': 'e402820827',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
|
@ -155,7 +155,6 @@ class GenericIE(InfoExtractor):
|
|||||||
# funnyordie embed
|
# funnyordie embed
|
||||||
{
|
{
|
||||||
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
|
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
|
||||||
'md5': '7cf780be104d40fea7bae52eed4a470e',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '18e820ec3f',
|
'id': '18e820ec3f',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -180,13 +179,13 @@ class GenericIE(InfoExtractor):
|
|||||||
# Embedded TED video
|
# Embedded TED video
|
||||||
{
|
{
|
||||||
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
|
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
|
||||||
'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
|
'md5': '65fdff94098e4a607385a60c5177c638',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '981',
|
'id': '1969',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'My web playroom',
|
'title': 'Hidden miracles of the natural world',
|
||||||
'uploader': 'Ze Frank',
|
'uploader': 'Louie Schwartzberg',
|
||||||
'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
|
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
# Embeded Ustream video
|
# Embeded Ustream video
|
||||||
@ -226,21 +225,6 @@ class GenericIE(InfoExtractor):
|
|||||||
'skip_download': 'Requires rtmpdump'
|
'skip_download': 'Requires rtmpdump'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
# smotri embed
|
|
||||||
{
|
|
||||||
'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
|
|
||||||
'md5': 'ec40048448e9284c9a1de77bb188108b',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'v27008541fad',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Крым и Севастополь вошли в состав России',
|
|
||||||
'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
|
|
||||||
'duration': 900,
|
|
||||||
'upload_date': '20140318',
|
|
||||||
'uploader': 'rbctv_2012_4',
|
|
||||||
'uploader_id': 'rbctv_2012_4',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
# Condé Nast embed
|
# Condé Nast embed
|
||||||
{
|
{
|
||||||
'url': 'http://www.wired.com/2014/04/honda-asimo/',
|
'url': 'http://www.wired.com/2014/04/honda-asimo/',
|
||||||
@ -295,13 +279,13 @@ class GenericIE(InfoExtractor):
|
|||||||
{
|
{
|
||||||
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
|
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'jpSGZsgga_I',
|
'id': '4vAffPZIT44',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Asphalt 8: Airborne - Launch Trailer',
|
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
|
||||||
'uploader': 'Gameloft',
|
'uploader': 'Gameloft',
|
||||||
'uploader_id': 'gameloft',
|
'uploader_id': 'gameloft',
|
||||||
'upload_date': '20130821',
|
'upload_date': '20140828',
|
||||||
'description': 'md5:87bd95f13d8be3e7da87a5f2c443106a',
|
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
@ -382,14 +366,21 @@ class GenericIE(InfoExtractor):
|
|||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
# Wistia embed
|
||||||
|
{
|
||||||
|
'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
|
||||||
|
'md5': '8788b683c777a5cf25621eaf286d0c23',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1cfaf6b7ea',
|
||||||
|
'ext': 'mov',
|
||||||
|
'title': 'md5:51364a8d3d009997ba99656004b5e20d',
|
||||||
|
'duration': 643.0,
|
||||||
|
'filesize': 182808282,
|
||||||
|
'uploader': 'education-portal.com',
|
||||||
|
},
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
def report_download_webpage(self, video_id):
|
|
||||||
"""Report webpage download."""
|
|
||||||
if not self._downloader.params.get('test', False):
|
|
||||||
self._downloader.report_warning('Falling back on generic information extractor.')
|
|
||||||
super(GenericIE, self).report_download_webpage(video_id)
|
|
||||||
|
|
||||||
def report_following_redirect(self, new_url):
|
def report_following_redirect(self, new_url):
|
||||||
"""Report information extraction."""
|
"""Report information extraction."""
|
||||||
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
|
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
|
||||||
@ -489,6 +480,7 @@ class GenericIE(InfoExtractor):
|
|||||||
|
|
||||||
url, smuggled_data = unsmuggle_url(url)
|
url, smuggled_data = unsmuggle_url(url)
|
||||||
force_videoid = None
|
force_videoid = None
|
||||||
|
is_intentional = smuggled_data and smuggled_data.get('to_generic')
|
||||||
if smuggled_data and 'force_videoid' in smuggled_data:
|
if smuggled_data and 'force_videoid' in smuggled_data:
|
||||||
force_videoid = smuggled_data['force_videoid']
|
force_videoid = smuggled_data['force_videoid']
|
||||||
video_id = force_videoid
|
video_id = force_videoid
|
||||||
@ -531,6 +523,9 @@ class GenericIE(InfoExtractor):
|
|||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if not self._downloader.params.get('test', False) and not is_intentional:
|
||||||
|
self._downloader.report_warning('Falling back on generic information extractor.')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@ -631,7 +626,7 @@ class GenericIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
(["\'])
|
(["\'])
|
||||||
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
|
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
|
||||||
(?:embed|v)/.+?)
|
(?:embed|v|p)/.+?)
|
||||||
\1''', webpage)
|
\1''', webpage)
|
||||||
if matches:
|
if matches:
|
||||||
return _playlist_from_matches(
|
return _playlist_from_matches(
|
||||||
@ -644,6 +639,16 @@ class GenericIE(InfoExtractor):
|
|||||||
return _playlist_from_matches(
|
return _playlist_from_matches(
|
||||||
matches, lambda m: unescapeHTML(m[1]))
|
matches, lambda m: unescapeHTML(m[1]))
|
||||||
|
|
||||||
|
# Look for embedded Dailymotion playlist player (#3822)
|
||||||
|
m = re.search(
|
||||||
|
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
|
||||||
|
if m:
|
||||||
|
playlists = re.findall(
|
||||||
|
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
|
||||||
|
if playlists:
|
||||||
|
return _playlist_from_matches(
|
||||||
|
playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
|
||||||
|
|
||||||
# Look for embedded Wistia player
|
# Look for embedded Wistia player
|
||||||
match = re.search(
|
match = re.search(
|
||||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
|
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
|
||||||
@ -656,6 +661,16 @@ class GenericIE(InfoExtractor):
|
|||||||
'title': video_title,
|
'title': video_title,
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
}
|
}
|
||||||
|
match = re.search(r'(?:id=["\']wistia_|data-wistiaid=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
|
||||||
|
if match:
|
||||||
|
return {
|
||||||
|
'_type': 'url_transparent',
|
||||||
|
'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
|
||||||
|
'ie_key': 'Wistia',
|
||||||
|
'uploader': video_uploader,
|
||||||
|
'title': video_title,
|
||||||
|
'id': match.group('id')
|
||||||
|
}
|
||||||
|
|
||||||
# Look for embedded blip.tv player
|
# Look for embedded blip.tv player
|
||||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||||
@ -832,47 +847,51 @@ class GenericIE(InfoExtractor):
|
|||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
return self.url_result(mobj.group('url'), 'MLB')
|
return self.url_result(mobj.group('url'), 'MLB')
|
||||||
|
|
||||||
|
def check_video(vurl):
|
||||||
|
vpath = compat_urlparse.urlparse(vurl).path
|
||||||
|
vext = determine_ext(vpath)
|
||||||
|
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
|
||||||
|
|
||||||
|
def filter_video(urls):
|
||||||
|
return list(filter(check_video, urls))
|
||||||
|
|
||||||
# Start with something easy: JW Player in SWFObject
|
# Start with something easy: JW Player in SWFObject
|
||||||
found = re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# Look for gorilla-vid style embedding
|
# Look for gorilla-vid style embedding
|
||||||
found = re.findall(r'''(?sx)
|
found = filter_video(re.findall(r'''(?sx)
|
||||||
(?:
|
(?:
|
||||||
jw_plugins|
|
jw_plugins|
|
||||||
JWPlayerOptions|
|
JWPlayerOptions|
|
||||||
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
|
||||||
)
|
)
|
||||||
.*?file\s*:\s*["\'](.*?)["\']''', webpage)
|
.*?file\s*:\s*["\'](.*?)["\']''', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# Broaden the search a little bit
|
# Broaden the search a little bit
|
||||||
found = re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# Broaden the findall a little bit: JWPlayer JS loader
|
# Broaden the findall a little bit: JWPlayer JS loader
|
||||||
found = re.findall(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
|
found = filter_video(re.findall(
|
||||||
|
r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# Flow player
|
# Flow player
|
||||||
found = re.findall(r'''(?xs)
|
found = filter_video(re.findall(r'''(?xs)
|
||||||
flowplayer\("[^"]+",\s*
|
flowplayer\("[^"]+",\s*
|
||||||
\{[^}]+?\}\s*,
|
\{[^}]+?\}\s*,
|
||||||
\s*{[^}]+? ["']?clip["']?\s*:\s*\{\s*
|
\s*{[^}]+? ["']?clip["']?\s*:\s*\{\s*
|
||||||
["']?url["']?\s*:\s*["']([^"']+)["']
|
["']?url["']?\s*:\s*["']([^"']+)["']
|
||||||
''', webpage)
|
''', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# Try to find twitter cards info
|
# Try to find twitter cards info
|
||||||
found = re.findall(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
found = filter_video(re.findall(
|
||||||
|
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# We look for Open Graph info:
|
# We look for Open Graph info:
|
||||||
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
|
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
|
||||||
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
|
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
|
||||||
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
|
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
|
||||||
if m_video_type is not None:
|
if m_video_type is not None:
|
||||||
def check_video(vurl):
|
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
|
||||||
vpath = compat_urlparse.urlparse(vurl).path
|
|
||||||
vext = determine_ext(vpath)
|
|
||||||
return '.' in vpath and vext not in ('swf', 'png', 'jpg')
|
|
||||||
found = list(filter(
|
|
||||||
check_video,
|
|
||||||
re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)))
|
|
||||||
if not found:
|
if not found:
|
||||||
# HTML5 video
|
# HTML5 video
|
||||||
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]+)? src="([^"]+)"', webpage)
|
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]+)? src="([^"]+)"', webpage)
|
||||||
|
398
youtube_dl/extractor/globo.py
Normal file
398
youtube_dl/extractor/globo.py
Normal file
@ -0,0 +1,398 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import random
|
||||||
|
import math
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
float_or_none,
|
||||||
|
compat_str,
|
||||||
|
compat_chr,
|
||||||
|
compat_ord,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GloboIE(InfoExtractor):
|
||||||
|
_VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
|
||||||
|
|
||||||
|
_API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
|
||||||
|
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=2.9.9.50&resource_id=%s'
|
||||||
|
|
||||||
|
_VIDEOID_REGEXES = [
|
||||||
|
r'\bdata-video-id="(\d+)"',
|
||||||
|
r'\bdata-player-videosids="(\d+)"',
|
||||||
|
r'<div[^>]+\bid="(\d+)"',
|
||||||
|
]
|
||||||
|
|
||||||
|
_RESIGN_EXPIRATION = 86400
|
||||||
|
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/',
|
||||||
|
'md5': '03ebf41cb7ade43581608b7d9b71fab0',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3654973',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão',
|
||||||
|
'duration': 251.585,
|
||||||
|
'uploader': 'SporTV',
|
||||||
|
'uploader_id': 698,
|
||||||
|
'like_count': int,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
|
||||||
|
'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3607726',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
|
||||||
|
'duration': 103.204,
|
||||||
|
'uploader': 'Globo.com',
|
||||||
|
'uploader_id': 265,
|
||||||
|
'like_count': int,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
|
||||||
|
'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3652183',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
|
||||||
|
'duration': 110.711,
|
||||||
|
'uploader': 'Rede Globo',
|
||||||
|
'uploader_id': 196,
|
||||||
|
'like_count': int,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
class MD5():
|
||||||
|
HEX_FORMAT_LOWERCASE = 0
|
||||||
|
HEX_FORMAT_UPPERCASE = 1
|
||||||
|
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
|
||||||
|
BASE64_PAD_CHARACTER_RFC_COMPLIANCE = '='
|
||||||
|
PADDING = '=0xFF01DD'
|
||||||
|
hexcase = 0
|
||||||
|
b64pad = ''
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class JSArray(list):
|
||||||
|
def __getitem__(self, y):
|
||||||
|
try:
|
||||||
|
return list.__getitem__(self, y)
|
||||||
|
except IndexError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def __setitem__(self, i, y):
|
||||||
|
try:
|
||||||
|
return list.__setitem__(self, i, y)
|
||||||
|
except IndexError:
|
||||||
|
self.extend([0] * (i - len(self) + 1))
|
||||||
|
self[-1] = y
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def hex_md5(cls, param1):
|
||||||
|
return cls.rstr2hex(cls.rstr_md5(cls.str2rstr_utf8(param1)))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def b64_md5(cls, param1, param2=None):
|
||||||
|
return cls.rstr2b64(cls.rstr_md5(cls.str2rstr_utf8(param1, param2)))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def any_md5(cls, param1, param2):
|
||||||
|
return cls.rstr2any(cls.rstr_md5(cls.str2rstr_utf8(param1)), param2)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def rstr_md5(cls, param1):
|
||||||
|
return cls.binl2rstr(cls.binl_md5(cls.rstr2binl(param1), len(param1) * 8))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def rstr2hex(cls, param1):
|
||||||
|
_loc_2 = '0123456789ABCDEF' if cls.hexcase else '0123456789abcdef'
|
||||||
|
_loc_3 = ''
|
||||||
|
for _loc_5 in range(0, len(param1)):
|
||||||
|
_loc_4 = compat_ord(param1[_loc_5])
|
||||||
|
_loc_3 += _loc_2[_loc_4 >> 4 & 15] + _loc_2[_loc_4 & 15]
|
||||||
|
return _loc_3
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def rstr2b64(cls, param1):
|
||||||
|
_loc_2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
|
||||||
|
_loc_3 = ''
|
||||||
|
_loc_4 = len(param1)
|
||||||
|
for _loc_5 in range(0, _loc_4, 3):
|
||||||
|
_loc_6_1 = compat_ord(param1[_loc_5]) << 16
|
||||||
|
_loc_6_2 = compat_ord(param1[_loc_5 + 1]) << 8 if _loc_5 + 1 < _loc_4 else 0
|
||||||
|
_loc_6_3 = compat_ord(param1[_loc_5 + 2]) if _loc_5 + 2 < _loc_4 else 0
|
||||||
|
_loc_6 = _loc_6_1 | _loc_6_2 | _loc_6_3
|
||||||
|
for _loc_7 in range(0, 4):
|
||||||
|
if _loc_5 * 8 + _loc_7 * 6 > len(param1) * 8:
|
||||||
|
_loc_3 += cls.b64pad
|
||||||
|
else:
|
||||||
|
_loc_3 += _loc_2[_loc_6 >> 6 * (3 - _loc_7) & 63]
|
||||||
|
return _loc_3
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def rstr2any(param1, param2):
|
||||||
|
_loc_3 = len(param2)
|
||||||
|
_loc_4 = []
|
||||||
|
_loc_9 = [0] * ((len(param1) >> 2) + 1)
|
||||||
|
for _loc_5 in range(0, len(_loc_9)):
|
||||||
|
_loc_9[_loc_5] = compat_ord(param1[_loc_5 * 2]) << 8 | compat_ord(param1[_loc_5 * 2 + 1])
|
||||||
|
|
||||||
|
while len(_loc_9) > 0:
|
||||||
|
_loc_8 = []
|
||||||
|
_loc_7 = 0
|
||||||
|
for _loc_5 in range(0, len(_loc_9)):
|
||||||
|
_loc_7 = (_loc_7 << 16) + _loc_9[_loc_5]
|
||||||
|
_loc_6 = math.floor(_loc_7 / _loc_3)
|
||||||
|
_loc_7 -= _loc_6 * _loc_3
|
||||||
|
if len(_loc_8) > 0 or _loc_6 > 0:
|
||||||
|
_loc_8[len(_loc_8)] = _loc_6
|
||||||
|
|
||||||
|
_loc_4[len(_loc_4)] = _loc_7
|
||||||
|
_loc_9 = _loc_8
|
||||||
|
|
||||||
|
_loc_10 = ''
|
||||||
|
_loc_5 = len(_loc_4) - 1
|
||||||
|
while _loc_5 >= 0:
|
||||||
|
_loc_10 += param2[_loc_4[_loc_5]]
|
||||||
|
_loc_5 -= 1
|
||||||
|
|
||||||
|
return _loc_10
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def str2rstr_utf8(cls, param1, param2=None):
|
||||||
|
_loc_3 = ''
|
||||||
|
_loc_4 = -1
|
||||||
|
if not param2:
|
||||||
|
param2 = cls.PADDING
|
||||||
|
param1 = param1 + param2[1:9]
|
||||||
|
while True:
|
||||||
|
_loc_4 += 1
|
||||||
|
if _loc_4 >= len(param1):
|
||||||
|
break
|
||||||
|
_loc_5 = compat_ord(param1[_loc_4])
|
||||||
|
_loc_6 = compat_ord(param1[_loc_4 + 1]) if _loc_4 + 1 < len(param1) else 0
|
||||||
|
if 55296 <= _loc_5 <= 56319 and 56320 <= _loc_6 <= 57343:
|
||||||
|
_loc_5 = 65536 + ((_loc_5 & 1023) << 10) + (_loc_6 & 1023)
|
||||||
|
_loc_4 += 1
|
||||||
|
if _loc_5 <= 127:
|
||||||
|
_loc_3 += compat_chr(_loc_5)
|
||||||
|
continue
|
||||||
|
if _loc_5 <= 2047:
|
||||||
|
_loc_3 += compat_chr(192 | _loc_5 >> 6 & 31) + compat_chr(128 | _loc_5 & 63)
|
||||||
|
continue
|
||||||
|
if _loc_5 <= 65535:
|
||||||
|
_loc_3 += compat_chr(224 | _loc_5 >> 12 & 15) + compat_chr(128 | _loc_5 >> 6 & 63) + compat_chr(
|
||||||
|
128 | _loc_5 & 63)
|
||||||
|
continue
|
||||||
|
if _loc_5 <= 2097151:
|
||||||
|
_loc_3 += compat_chr(240 | _loc_5 >> 18 & 7) + compat_chr(128 | _loc_5 >> 12 & 63) + compat_chr(
|
||||||
|
128 | _loc_5 >> 6 & 63) + compat_chr(128 | _loc_5 & 63)
|
||||||
|
return _loc_3
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def rstr2binl(param1):
|
||||||
|
_loc_2 = [0] * ((len(param1) >> 2) + 1)
|
||||||
|
for _loc_3 in range(0, len(_loc_2)):
|
||||||
|
_loc_2[_loc_3] = 0
|
||||||
|
for _loc_3 in range(0, len(param1) * 8, 8):
|
||||||
|
_loc_2[_loc_3 >> 5] |= (compat_ord(param1[_loc_3 // 8]) & 255) << _loc_3 % 32
|
||||||
|
return _loc_2
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def binl2rstr(param1):
|
||||||
|
_loc_2 = ''
|
||||||
|
for _loc_3 in range(0, len(param1) * 32, 8):
|
||||||
|
_loc_2 += compat_chr(param1[_loc_3 >> 5] >> _loc_3 % 32 & 255)
|
||||||
|
return _loc_2
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def binl_md5(cls, param1, param2):
|
||||||
|
param1 = cls.JSArray(param1)
|
||||||
|
param1[param2 >> 5] |= 128 << param2 % 32
|
||||||
|
param1[(param2 + 64 >> 9 << 4) + 14] = param2
|
||||||
|
_loc_3 = 1732584193
|
||||||
|
_loc_4 = -271733879
|
||||||
|
_loc_5 = -1732584194
|
||||||
|
_loc_6 = 271733878
|
||||||
|
for _loc_7 in range(0, len(param1), 16):
|
||||||
|
_loc_8 = _loc_3
|
||||||
|
_loc_9 = _loc_4
|
||||||
|
_loc_10 = _loc_5
|
||||||
|
_loc_11 = _loc_6
|
||||||
|
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 7, -680876936)
|
||||||
|
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 1], 12, -389564586)
|
||||||
|
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 17, 606105819)
|
||||||
|
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 3], 22, -1044525330)
|
||||||
|
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 7, -176418897)
|
||||||
|
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 5], 12, 1200080426)
|
||||||
|
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 17, -1473231341)
|
||||||
|
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 7], 22, -45705983)
|
||||||
|
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 7, 1770035416)
|
||||||
|
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 9], 12, -1958414417)
|
||||||
|
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 17, -42063)
|
||||||
|
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 11], 22, -1990404162)
|
||||||
|
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 7, 1804603682)
|
||||||
|
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 13], 12, -40341101)
|
||||||
|
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 17, -1502002290)
|
||||||
|
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 15], 22, 1236535329)
|
||||||
|
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 5, -165796510)
|
||||||
|
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 6], 9, -1069501632)
|
||||||
|
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 14, 643717713)
|
||||||
|
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 0], 20, -373897302)
|
||||||
|
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 5, -701558691)
|
||||||
|
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 10], 9, 38016083)
|
||||||
|
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 14, -660478335)
|
||||||
|
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 4], 20, -405537848)
|
||||||
|
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 5, 568446438)
|
||||||
|
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 14], 9, -1019803690)
|
||||||
|
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 14, -187363961)
|
||||||
|
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 8], 20, 1163531501)
|
||||||
|
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 5, -1444681467)
|
||||||
|
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 2], 9, -51403784)
|
||||||
|
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 14, 1735328473)
|
||||||
|
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 12], 20, -1926607734)
|
||||||
|
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 4, -378558)
|
||||||
|
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 8], 11, -2022574463)
|
||||||
|
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 16, 1839030562)
|
||||||
|
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 14], 23, -35309556)
|
||||||
|
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 4, -1530992060)
|
||||||
|
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 4], 11, 1272893353)
|
||||||
|
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 16, -155497632)
|
||||||
|
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 10], 23, -1094730640)
|
||||||
|
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 4, 681279174)
|
||||||
|
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 0], 11, -358537222)
|
||||||
|
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 16, -722521979)
|
||||||
|
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 6], 23, 76029189)
|
||||||
|
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 4, -640364487)
|
||||||
|
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 12], 11, -421815835)
|
||||||
|
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 16, 530742520)
|
||||||
|
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 2], 23, -995338651)
|
||||||
|
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 6, -198630844)
|
||||||
|
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 7], 10, 1126891415)
|
||||||
|
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 15, -1416354905)
|
||||||
|
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 5], 21, -57434055)
|
||||||
|
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 6, 1700485571)
|
||||||
|
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 3], 10, -1894986606)
|
||||||
|
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 15, -1051523)
|
||||||
|
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 1], 21, -2054922799)
|
||||||
|
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 6, 1873313359)
|
||||||
|
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 15], 10, -30611744)
|
||||||
|
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 15, -1560198380)
|
||||||
|
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 13], 21, 1309151649)
|
||||||
|
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 6, -145523070)
|
||||||
|
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 11], 10, -1120210379)
|
||||||
|
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 15, 718787259)
|
||||||
|
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 9], 21, -343485551)
|
||||||
|
_loc_3 = cls.safe_add(_loc_3, _loc_8)
|
||||||
|
_loc_4 = cls.safe_add(_loc_4, _loc_9)
|
||||||
|
_loc_5 = cls.safe_add(_loc_5, _loc_10)
|
||||||
|
_loc_6 = cls.safe_add(_loc_6, _loc_11)
|
||||||
|
return [_loc_3, _loc_4, _loc_5, _loc_6]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def md5_cmn(cls, param1, param2, param3, param4, param5, param6):
|
||||||
|
return cls.safe_add(
|
||||||
|
cls.bit_rol(cls.safe_add(cls.safe_add(param2, param1), cls.safe_add(param4, param6)), param5), param3)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def md5_ff(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||||
|
return cls.md5_cmn(param2 & param3 | ~param2 & param4, param1, param2, param5, param6, param7)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def md5_gg(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||||
|
return cls.md5_cmn(param2 & param4 | param3 & ~param4, param1, param2, param5, param6, param7)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def md5_hh(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||||
|
return cls.md5_cmn(param2 ^ param3 ^ param4, param1, param2, param5, param6, param7)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def md5_ii(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||||
|
return cls.md5_cmn(param3 ^ (param2 | ~param4), param1, param2, param5, param6, param7)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def safe_add(cls, param1, param2):
|
||||||
|
_loc_3 = (param1 & 65535) + (param2 & 65535)
|
||||||
|
_loc_4 = (param1 >> 16) + (param2 >> 16) + (_loc_3 >> 16)
|
||||||
|
return cls.lshift(_loc_4, 16) | _loc_3 & 65535
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def bit_rol(cls, param1, param2):
|
||||||
|
return cls.lshift(param1, param2) | (param1 & 0xFFFFFFFF) >> (32 - param2)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def lshift(value, count):
|
||||||
|
r = (0xFFFFFFFF & value) << count
|
||||||
|
return -(~(r - 1) & 0xFFFFFFFF) if r > 0x7FFFFFFF else r
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
|
||||||
|
|
||||||
|
video = self._download_json(
|
||||||
|
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
|
||||||
|
|
||||||
|
title = video['title']
|
||||||
|
duration = float_or_none(video['duration'], 1000)
|
||||||
|
like_count = video['likes']
|
||||||
|
uploader = video['channel']
|
||||||
|
uploader_id = video['channel_id']
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
|
||||||
|
for resource in video['resources']:
|
||||||
|
resource_id = resource.get('_id')
|
||||||
|
if not resource_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
security = self._download_json(
|
||||||
|
self._SECURITY_URL_TEMPLATE % (video_id, resource_id),
|
||||||
|
video_id, 'Downloading security hash for %s' % resource_id)
|
||||||
|
|
||||||
|
security_hash = security.get('hash')
|
||||||
|
if not security_hash:
|
||||||
|
message = security.get('message')
|
||||||
|
if message:
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s returned error: %s' % (self.IE_NAME, message), expected=True)
|
||||||
|
continue
|
||||||
|
|
||||||
|
hash_code = security_hash[:2]
|
||||||
|
received_time = int(security_hash[2:12])
|
||||||
|
received_random = security_hash[12:22]
|
||||||
|
received_md5 = security_hash[22:]
|
||||||
|
|
||||||
|
sign_time = received_time + self._RESIGN_EXPIRATION
|
||||||
|
padding = '%010d' % random.randint(1, 10000000000)
|
||||||
|
|
||||||
|
signed_md5 = self.MD5.b64_md5(received_md5 + compat_str(sign_time) + padding)
|
||||||
|
signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'url': '%s?h=%s&k=%s' % (resource['url'], signed_hash, 'flash'),
|
||||||
|
'format_id': resource_id,
|
||||||
|
'height': resource['height']
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'duration': duration,
|
||||||
|
'uploader': uploader,
|
||||||
|
'uploader_id': uploader_id,
|
||||||
|
'like_count': like_count,
|
||||||
|
'formats': formats
|
||||||
|
}
|
@ -36,16 +36,16 @@ class GodTubeIE(InfoExtractor):
|
|||||||
'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
|
'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
|
||||||
video_id, 'Downloading player config XML')
|
video_id, 'Downloading player config XML')
|
||||||
|
|
||||||
video_url = config.find('.//file').text
|
video_url = config.find('file').text
|
||||||
uploader = config.find('.//author').text
|
uploader = config.find('author').text
|
||||||
timestamp = parse_iso8601(config.find('.//date').text)
|
timestamp = parse_iso8601(config.find('date').text)
|
||||||
duration = parse_duration(config.find('.//duration').text)
|
duration = parse_duration(config.find('duration').text)
|
||||||
thumbnail = config.find('.//image').text
|
thumbnail = config.find('image').text
|
||||||
|
|
||||||
media = self._download_xml(
|
media = self._download_xml(
|
||||||
'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
|
'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
|
||||||
|
|
||||||
title = media.find('.//title').text
|
title = media.find('title').text
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
69
youtube_dl/extractor/golem.py
Normal file
69
youtube_dl/extractor/golem.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urlparse,
|
||||||
|
determine_ext,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GolemIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
|
||||||
|
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '14095',
|
||||||
|
'format_id': 'high',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'iPhone 6 und 6 Plus - Test',
|
||||||
|
'duration': 300.44,
|
||||||
|
'filesize': 65309548,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_PREFIX = 'http://video.golem.de'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
config = self._download_xml(
|
||||||
|
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
|
||||||
|
|
||||||
|
info = {
|
||||||
|
'id': video_id,
|
||||||
|
'title': config.findtext('./title', 'golem'),
|
||||||
|
'duration': self._float(config.findtext('./playtime'), 'duration'),
|
||||||
|
}
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for e in config:
|
||||||
|
url = e.findtext('./url')
|
||||||
|
if not url:
|
||||||
|
continue
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'format_id': e.tag,
|
||||||
|
'url': compat_urlparse.urljoin(self._PREFIX, url),
|
||||||
|
'height': self._int(e.get('height'), 'height'),
|
||||||
|
'width': self._int(e.get('width'), 'width'),
|
||||||
|
'filesize': self._int(e.findtext('filesize'), 'filesize'),
|
||||||
|
'ext': determine_ext(e.findtext('./filename')),
|
||||||
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
info['formats'] = formats
|
||||||
|
|
||||||
|
thumbnails = []
|
||||||
|
for e in config.findall('.//teaser'):
|
||||||
|
url = e.findtext('./url')
|
||||||
|
if not url:
|
||||||
|
continue
|
||||||
|
thumbnails.append({
|
||||||
|
'url': compat_urlparse.urljoin(self._PREFIX, url),
|
||||||
|
'width': self._int(e.get('width'), 'thumbnail width'),
|
||||||
|
'height': self._int(e.get('height'), 'thumbnail height'),
|
||||||
|
})
|
||||||
|
info['thumbnails'] = thumbnails
|
||||||
|
|
||||||
|
return info
|
@ -1,13 +1,11 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
|
||||||
import re
|
import re
|
||||||
|
import codecs
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import unified_strdate
|
||||||
ExtractorError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class GooglePlusIE(InfoExtractor):
|
class GooglePlusIE(InfoExtractor):
|
||||||
@ -19,74 +17,57 @@ class GooglePlusIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'ZButuJc6CtH',
|
'id': 'ZButuJc6CtH',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
|
'title': '嘆きの天使 降臨',
|
||||||
'upload_date': '20120613',
|
'upload_date': '20120613',
|
||||||
'uploader': '井上ヨシマサ',
|
'uploader': '井上ヨシマサ',
|
||||||
'title': '嘆きの天使 降臨',
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# Extract id from URL
|
video_id = self._match_id(url)
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
# Step 1, Retrieve post webpage to extract further information
|
# Step 1, Retrieve post webpage to extract further information
|
||||||
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
|
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
title = self._og_search_description(webpage).splitlines()[0]
|
||||||
|
upload_date = unified_strdate(self._html_search_regex(
|
||||||
# Extract update date
|
|
||||||
upload_date = self._html_search_regex(
|
|
||||||
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
|
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
|
||||||
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
|
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
|
||||||
webpage, 'upload date', fatal=False, flags=re.VERBOSE)
|
webpage, 'upload date', fatal=False, flags=re.VERBOSE))
|
||||||
if upload_date:
|
uploader = self._html_search_regex(
|
||||||
# Convert timestring to a format suitable for filename
|
r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
|
||||||
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
|
|
||||||
upload_date = upload_date.strftime('%Y%m%d')
|
|
||||||
|
|
||||||
# Extract uploader
|
|
||||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
|
||||||
webpage, 'uploader', fatal=False)
|
|
||||||
|
|
||||||
# Extract title
|
|
||||||
# Get the first line for title
|
|
||||||
video_title = self._og_search_description(webpage).splitlines()[0]
|
|
||||||
|
|
||||||
# Step 2, Simulate clicking the image box to launch video
|
# Step 2, Simulate clicking the image box to launch video
|
||||||
DOMAIN = 'https://plus.google.com/'
|
DOMAIN = 'https://plus.google.com/'
|
||||||
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
video_page = self._search_regex(
|
||||||
|
r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||||
webpage, 'video page URL')
|
webpage, 'video page URL')
|
||||||
if not video_page.startswith(DOMAIN):
|
if not video_page.startswith(DOMAIN):
|
||||||
video_page = DOMAIN + video_page
|
video_page = DOMAIN + video_page
|
||||||
|
|
||||||
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
|
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
|
||||||
|
|
||||||
|
def unicode_escape(s):
|
||||||
|
decoder = codecs.getdecoder('unicode_escape')
|
||||||
|
return re.sub(
|
||||||
|
r'\\u[0-9a-fA-F]{4,}',
|
||||||
|
lambda m: decoder(m.group(0))[0],
|
||||||
|
s)
|
||||||
|
|
||||||
# Extract video links all sizes
|
# Extract video links all sizes
|
||||||
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
|
formats = [{
|
||||||
mobj = re.findall(pattern, webpage)
|
'url': unicode_escape(video_url),
|
||||||
if len(mobj) == 0:
|
'ext': 'flv',
|
||||||
raise ExtractorError('Unable to extract video links')
|
'width': int(width),
|
||||||
|
'height': int(height),
|
||||||
# Sort in resolution
|
} for width, height, video_url in re.findall(
|
||||||
links = sorted(mobj)
|
r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
|
||||||
|
self._sort_formats(formats)
|
||||||
# Choose the lowest of the sort, i.e. highest resolution
|
|
||||||
video_url = links[-1]
|
|
||||||
# Only get the url. The resolution part in the tuple has no use anymore
|
|
||||||
video_url = video_url[-1]
|
|
||||||
# Treat escaped \u0026 style hex
|
|
||||||
try:
|
|
||||||
video_url = video_url.decode("unicode_escape")
|
|
||||||
except AttributeError: # Python 3
|
|
||||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'title': title,
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
'upload_date': upload_date,
|
'upload_date': upload_date,
|
||||||
'title': video_title,
|
'formats': formats,
|
||||||
'ext': 'flv',
|
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
@ -12,20 +13,22 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class GorillaVidIE(InfoExtractor):
|
class GorillaVidIE(InfoExtractor):
|
||||||
IE_DESC = 'GorillaVid.in and daclips.in'
|
IE_DESC = 'GorillaVid.in, daclips.in and movpod.in'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://(?P<host>(?:www\.)?
|
https?://(?P<host>(?:www\.)?
|
||||||
(?:daclips\.in|gorillavid\.in))/
|
(?:daclips\.in|gorillavid\.in|movpod\.in))/
|
||||||
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
|
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
_FILE_NOT_FOUND_REGEX = r'>(?:404 - )?File Not Found<'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://gorillavid.in/06y9juieqpmi',
|
'url': 'http://gorillavid.in/06y9juieqpmi',
|
||||||
'md5': '5ae4a3580620380619678ee4875893ba',
|
'md5': '5ae4a3580620380619678ee4875893ba',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '06y9juieqpmi',
|
'id': '06y9juieqpmi',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': 'Rebecca Black My Moment Official Music Video Reaction',
|
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@ -46,6 +49,9 @@ class GorillaVidIE(InfoExtractor):
|
|||||||
'title': 'Micro Pig piglets ready on 16th July 2009',
|
'title': 'Micro Pig piglets ready on 16th July 2009',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://movpod.in/0wguyyxi1yca',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -54,6 +60,9 @@ class GorillaVidIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
|
webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
|
||||||
|
|
||||||
|
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
|
||||||
|
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||||
|
|
||||||
fields = dict(re.findall(r'''(?x)<input\s+
|
fields = dict(re.findall(r'''(?x)<input\s+
|
||||||
type="hidden"\s+
|
type="hidden"\s+
|
||||||
name="([^"]+)"\s+
|
name="([^"]+)"\s+
|
||||||
@ -69,14 +78,14 @@ class GorillaVidIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(req, video_id, 'Downloading video page')
|
webpage = self._download_webpage(req, video_id, 'Downloading video page')
|
||||||
|
|
||||||
title = self._search_regex(r'style="z-index: [0-9]+;">([0-9a-zA-Z ]+)(?:-.+)?</span>', webpage, 'title')
|
title = self._search_regex(r'style="z-index: [0-9]+;">([^<]+)</span>', webpage, 'title')
|
||||||
thumbnail = self._search_regex(r'image:\'(http[^\']+)\',', webpage, 'thumbnail')
|
video_url = self._search_regex(r'file\s*:\s*\'(http[^\']+)\',', webpage, 'file url')
|
||||||
url = self._search_regex(r'file: \'(http[^\']+)\',', webpage, 'file url')
|
thumbnail = self._search_regex(r'image\s*:\s*\'(http[^\']+)\',', webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
formats = [{
|
formats = [{
|
||||||
'format_id': 'sd',
|
'format_id': 'sd',
|
||||||
'url': url,
|
'url': video_url,
|
||||||
'ext': determine_ext(url),
|
'ext': determine_ext(video_url),
|
||||||
'quality': 1,
|
'quality': 1,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
81
youtube_dl/extractor/heise.py
Normal file
81
youtube_dl/extractor/heise.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
get_meta_content,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HeiseIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'''(?x)
|
||||||
|
https?://(?:www\.)?heise\.de/video/artikel/
|
||||||
|
.+?(?P<id>[0-9]+)\.html(?:$|[?#])
|
||||||
|
'''
|
||||||
|
_TEST = {
|
||||||
|
'url': (
|
||||||
|
'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
|
||||||
|
),
|
||||||
|
'md5': 'ffed432483e922e88545ad9f2f15d30e',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2404147',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': (
|
||||||
|
"Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
|
||||||
|
),
|
||||||
|
'format_id': 'mp4_720',
|
||||||
|
'timestamp': 1411812600,
|
||||||
|
'upload_date': '20140927',
|
||||||
|
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
json_url = self._search_regex(
|
||||||
|
r'json_url:\s*"([^"]+)"', webpage, 'json URL')
|
||||||
|
config = self._download_json(json_url, video_id)
|
||||||
|
|
||||||
|
info = {
|
||||||
|
'id': video_id,
|
||||||
|
'thumbnail': config.get('poster'),
|
||||||
|
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
}
|
||||||
|
|
||||||
|
title = get_meta_content('fulltitle', webpage)
|
||||||
|
if title:
|
||||||
|
info['title'] = title
|
||||||
|
elif config.get('title'):
|
||||||
|
info['title'] = config['title']
|
||||||
|
else:
|
||||||
|
info['title'] = self._og_search_title(webpage)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for t, rs in config['formats'].items():
|
||||||
|
if not rs or not hasattr(rs, 'items'):
|
||||||
|
self._downloader.report_warning(
|
||||||
|
'formats: {0}: no resolutions'.format(t))
|
||||||
|
continue
|
||||||
|
|
||||||
|
for height_str, obj in rs.items():
|
||||||
|
format_id = '{0}_{1}'.format(t, height_str)
|
||||||
|
|
||||||
|
if not obj or not obj.get('url'):
|
||||||
|
self._downloader.report_warning(
|
||||||
|
'formats: {0}: no url'.format(format_id))
|
||||||
|
continue
|
||||||
|
|
||||||
|
formats.append({
|
||||||
|
'url': obj['url'],
|
||||||
|
'format_id': format_id,
|
||||||
|
'height': self._int(height_str, 'height'),
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
info['formats'] = formats
|
||||||
|
|
||||||
|
return info
|
@ -28,13 +28,13 @@ class HowStuffWorksIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://adventure.howstuffworks.com/39516-deadliest-catch-jakes-farewell-pots-video.htm',
|
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '553470',
|
'id': '453464',
|
||||||
'display_id': 'deadliest-catch-jakes-farewell-pots',
|
'display_id': 'survival-zone-food-and-water-in-the-savanna',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Deadliest Catch: Jake\'s Farewell Pots',
|
'title': 'Survival Zone: Food and Water In the Savanna',
|
||||||
'description': 'md5:9632c346d5e43ee238028c9cefd8dbbc',
|
'description': 'md5:7e1c89f6411434970c15fa094170c371',
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
|
@ -33,8 +33,7 @@ class HuffPostIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
|
api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
|
||||||
data = self._download_json(api_url, video_id)['data']
|
data = self._download_json(api_url, video_id)['data']
|
||||||
|
@ -89,7 +89,12 @@ class IGNIE(InfoExtractor):
|
|||||||
'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
|
'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
|
||||||
webpage)
|
webpage)
|
||||||
if multiple_urls:
|
if multiple_urls:
|
||||||
return [self.url_result(u, ie='IGN') for u in multiple_urls]
|
entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': name_or_id,
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
|
|
||||||
video_id = self._find_video_id(webpage)
|
video_id = self._find_video_id(webpage)
|
||||||
result = self._get_video_info(video_id)
|
result = self._get_video_info(video_id)
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -12,12 +14,13 @@ class InternetVideoArchiveIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
|
_VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
|
'url': 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
|
||||||
u'file': u'452693.mp4',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '452693',
|
||||||
u'title': u'SKYFALL',
|
'ext': 'mp4',
|
||||||
u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
|
'title': 'SKYFALL',
|
||||||
u'duration': 153,
|
'description': 'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
|
||||||
|
'duration': 149,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,7 +45,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
|||||||
url = self._build_url(query)
|
url = self._build_url(query)
|
||||||
|
|
||||||
flashconfiguration = self._download_xml(url, video_id,
|
flashconfiguration = self._download_xml(url, video_id,
|
||||||
u'Downloading flash configuration')
|
'Downloading flash configuration')
|
||||||
file_url = flashconfiguration.find('file').text
|
file_url = flashconfiguration.find('file').text
|
||||||
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
|
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
|
||||||
# Replace some of the parameters in the query to get the best quality
|
# Replace some of the parameters in the query to get the best quality
|
||||||
@ -51,7 +54,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
|||||||
lambda m: self._clean_query(m.group()),
|
lambda m: self._clean_query(m.group()),
|
||||||
file_url)
|
file_url)
|
||||||
info = self._download_xml(file_url, video_id,
|
info = self._download_xml(file_url, video_id,
|
||||||
u'Downloading video info')
|
'Downloading video info')
|
||||||
item = info.find('channel/item')
|
item = info.find('channel/item')
|
||||||
|
|
||||||
def _bp(p):
|
def _bp(p):
|
||||||
|
@ -63,7 +63,8 @@ class IzleseneIE(InfoExtractor):
|
|||||||
|
|
||||||
title = self._og_search_title(webpage)
|
title = self._og_search_title(webpage)
|
||||||
description = self._og_search_description(webpage)
|
description = self._og_search_description(webpage)
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
thumbnail = self._proto_relative_url(
|
||||||
|
self._og_search_thumbnail(webpage), scheme='http:')
|
||||||
|
|
||||||
uploader = self._html_search_regex(
|
uploader = self._html_search_regex(
|
||||||
r"adduserUsername\s*=\s*'([^']+)';",
|
r"adduserUsername\s*=\s*'([^']+)';",
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
@ -12,14 +10,14 @@ from ..utils import (
|
|||||||
|
|
||||||
class JpopsukiIE(InfoExtractor):
|
class JpopsukiIE(InfoExtractor):
|
||||||
IE_NAME = 'jpopsuki.tv'
|
IE_NAME = 'jpopsuki.tv'
|
||||||
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
|
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/(?:category/)?video/[^/]+/(?P<id>\S+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
|
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
|
||||||
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
|
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
|
||||||
'file': '00be659d23b0b40508169cdee4545771.mp4',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '00be659d23b0b40508169cdee4545771',
|
'id': '00be659d23b0b40508169cdee4545771',
|
||||||
|
'ext': 'mp4',
|
||||||
'title': 'ayumi hamasaki - evolution',
|
'title': 'ayumi hamasaki - evolution',
|
||||||
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
|
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
|
||||||
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
|
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
|
||||||
@ -30,8 +28,7 @@ class JpopsukiIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
@ -47,11 +44,9 @@ class JpopsukiIE(InfoExtractor):
|
|||||||
uploader_id = self._html_search_regex(
|
uploader_id = self._html_search_regex(
|
||||||
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
|
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
|
||||||
webpage, 'video uploader_id', fatal=False)
|
webpage, 'video uploader_id', fatal=False)
|
||||||
upload_date = self._html_search_regex(
|
upload_date = unified_strdate(self._html_search_regex(
|
||||||
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
|
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
|
||||||
fatal=False)
|
fatal=False))
|
||||||
if upload_date is not None:
|
|
||||||
upload_date = unified_strdate(upload_date)
|
|
||||||
view_count_str = self._html_search_regex(
|
view_count_str = self._html_search_regex(
|
||||||
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
|
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
|
||||||
fatal=False)
|
fatal=False)
|
||||||
|
@ -11,10 +11,9 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class JukeboxIE(InfoExtractor):
|
class JukeboxIE(InfoExtractor):
|
||||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
|
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<id>[a-z0-9\-]+)\.html'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
|
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
|
||||||
'md5': '1574e9b4d6438446d5b7dbcdf2786276',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'r303r',
|
'id': 'r303r',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
@ -24,8 +23,7 @@ class JukeboxIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('video_id')
|
|
||||||
|
|
||||||
html = self._download_webpage(url, video_id)
|
html = self._download_webpage(url, video_id)
|
||||||
iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))
|
iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))
|
||||||
|
@ -34,7 +34,7 @@ class KontrTubeIE(InfoExtractor):
|
|||||||
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
|
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
|
||||||
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
|
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage, 'video title')
|
r'<title>(.+?)</title>', webpage, 'video title')
|
||||||
description = self._html_search_meta('description', webpage, 'video description')
|
description = self._html_search_meta('description', webpage, 'video description')
|
||||||
|
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
|
69
youtube_dl/extractor/lrt.py
Normal file
69
youtube_dl/extractor/lrt.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
js_to_json,
|
||||||
|
parse_duration,
|
||||||
|
remove_end,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LRTIE(InfoExtractor):
|
||||||
|
IE_NAME = 'lrt.lt'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.lrt.lt/mediateka/irasas/54391/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '54391',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Septynios Kauno dienos',
|
||||||
|
'description': 'Kauno miesto ir apskrities naujienos',
|
||||||
|
'duration': 1783,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True, # HLS download
|
||||||
|
},
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
title = remove_end(self._og_search_title(webpage), ' - LRT')
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
description = self._og_search_description(webpage)
|
||||||
|
duration = parse_duration(self._search_regex(
|
||||||
|
r"'duration':\s*'([^']+)',", webpage,
|
||||||
|
'duration', fatal=False, default=None))
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for js in re.findall(r'(?s)config:\s*(\{.*?\})', webpage):
|
||||||
|
data = json.loads(js_to_json(js))
|
||||||
|
if data['provider'] == 'rtmp':
|
||||||
|
formats.append({
|
||||||
|
'format_id': 'rtmp',
|
||||||
|
'ext': determine_ext(data['file']),
|
||||||
|
'url': data['streamer'],
|
||||||
|
'play_path': 'mp4:%s' % data['file'],
|
||||||
|
'preference': -1,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
formats.extend(
|
||||||
|
self._extract_m3u8_formats(data['file'], video_id, 'mp4'))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'description': description,
|
||||||
|
'duration': duration,
|
||||||
|
}
|
@ -70,7 +70,7 @@ class MixcloudIE(InfoExtractor):
|
|||||||
raise ExtractorError('Unable to extract track url')
|
raise ExtractorError('Unable to extract track url')
|
||||||
|
|
||||||
PREFIX = (
|
PREFIX = (
|
||||||
r'<div class="cloudcast-play-button-container"'
|
r'<div class="cloudcast-play-button-container[^"]*?"'
|
||||||
r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
|
r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
PREFIX + r'm-title="([^"]+)"', webpage, 'title')
|
PREFIX + r'm-title="([^"]+)"', webpage, 'title')
|
||||||
|
@ -6,7 +6,6 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
find_xpath_attr,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -88,8 +87,9 @@ class MLBIE(InfoExtractor):
|
|||||||
duration = parse_duration(detail.find('./duration').text)
|
duration = parse_duration(detail.find('./duration').text)
|
||||||
timestamp = parse_iso8601(detail.attrib['date'][:-5])
|
timestamp = parse_iso8601(detail.attrib['date'][:-5])
|
||||||
|
|
||||||
thumbnail = find_xpath_attr(
|
thumbnails = [{
|
||||||
detail, './thumbnailScenarios/thumbnailScenario', 'type', '45').text
|
'url': thumbnail.text,
|
||||||
|
} for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for media_url in detail.findall('./url'):
|
for media_url in detail.findall('./url'):
|
||||||
@ -116,5 +116,5 @@ class MLBIE(InfoExtractor):
|
|||||||
'duration': duration,
|
'duration': duration,
|
||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'thumbnail': thumbnail,
|
'thumbnails': thumbnails,
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -23,6 +22,7 @@ class MuenchenTVIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$'
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
@ -33,9 +33,7 @@ class MuenchenTVIE(InfoExtractor):
|
|||||||
display_id = 'live'
|
display_id = 'live'
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
now = datetime.datetime.now()
|
title = self._live_title(self._og_search_title(webpage))
|
||||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
|
||||||
title = self._og_search_title(webpage) + ' ' + now_str
|
|
||||||
|
|
||||||
data_js = self._search_regex(
|
data_js = self._search_regex(
|
||||||
r'(?s)\nplaylist:\s*(\[.*?}\]),related:',
|
r'(?s)\nplaylist:\s*(\[.*?}\]),related:',
|
||||||
@ -73,5 +71,6 @@ class MuenchenTVIE(InfoExtractor):
|
|||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,16 +18,16 @@ class NDRIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'http://www.ndr.de/fernsehen/media/dienordreportage325.html',
|
'url': 'http://www.ndr.de/fernsehen/sendungen/nordmagazin/Kartoffeltage-in-der-Lewitz,nordmagazin25866.html',
|
||||||
'md5': '4a4eeafd17c3058b65f0c8f091355855',
|
'md5': '5bc5f5b92c82c0f8b26cddca34f8bb2c',
|
||||||
'note': 'Video file',
|
'note': 'Video file',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '325',
|
'id': '25866',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Blaue Bohnen aus Blocken',
|
'title': 'Kartoffeltage in der Lewitz',
|
||||||
'description': 'md5:190d71ba2ccddc805ed01547718963bc',
|
'description': 'md5:48c4c04dde604c8a9971b3d4e3b9eaa8',
|
||||||
'duration': 1715,
|
'duration': 166,
|
||||||
},
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://www.ndr.de/info/audio51535.html',
|
'url': 'http://www.ndr.de/info/audio51535.html',
|
||||||
|
@ -6,6 +6,7 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
remove_end,
|
remove_end,
|
||||||
)
|
)
|
||||||
@ -13,76 +14,116 @@ from ..utils import (
|
|||||||
|
|
||||||
class NFLIE(InfoExtractor):
|
class NFLIE(InfoExtractor):
|
||||||
IE_NAME = 'nfl.com'
|
IE_NAME = 'nfl.com'
|
||||||
_VALID_URL = r'(?x)https?://(?:www\.)?nfl\.com/(?:videos/(?:.+)/|.*?\#video=)(?P<id>\d..[0-9]+)'
|
_VALID_URL = r'''(?x)https?://
|
||||||
_PLAYER_CONFIG_URL = 'http://www.nfl.com/static/content/static/config/video/config.json'
|
(?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
|
||||||
_TEST = {
|
(?:.+?/)*
|
||||||
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
|
(?P<id>(?:\d[a-z]{2}\d{13}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
|
||||||
# 'md5': '5eb8c40a727dda106d510e5d6ffa79e5', # md5 checksum fluctuates
|
_TESTS = [
|
||||||
'info_dict': {
|
{
|
||||||
'id': '0ap3000000398478',
|
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
|
||||||
'ext': 'mp4',
|
'md5': '394ef771ddcd1354f665b471d78ec4c6',
|
||||||
'title': 'Week 3: Washington Redskins vs. Philadelphia Eagles highlights',
|
'info_dict': {
|
||||||
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
|
'id': '0ap3000000398478',
|
||||||
'upload_date': '20140921',
|
'ext': 'mp4',
|
||||||
'timestamp': 1411337580,
|
'title': 'Week 3: Redskins vs. Eagles highlights',
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
|
||||||
|
'upload_date': '20140921',
|
||||||
|
'timestamp': 1411337580,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
|
||||||
|
'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'LIVE: Post Game vs. Browns',
|
||||||
|
'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
|
||||||
|
'upload_date': '20131229',
|
||||||
|
'timestamp': 1388354455,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def prepend_host(host, url):
|
||||||
|
if not url.startswith('http'):
|
||||||
|
if not url.startswith('/'):
|
||||||
|
url = '/%s' % url
|
||||||
|
url = 'http://{0:}{1:}'.format(host, url)
|
||||||
|
return url
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def format_from_stream(stream, protocol, host, path_prefix='',
|
||||||
|
preference=0, note=None):
|
||||||
|
url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
|
||||||
|
protocol=protocol,
|
||||||
|
host=host,
|
||||||
|
prefix=path_prefix,
|
||||||
|
path=stream.get('path'),
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
'url': url,
|
||||||
|
'vbr': int_or_none(stream.get('rate', 0), 1000),
|
||||||
|
'preference': preference,
|
||||||
|
'format_note': note,
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id, host = mobj.group('id'), mobj.group('host')
|
||||||
|
|
||||||
config = self._download_json(self._PLAYER_CONFIG_URL, video_id,
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
config_url = NFLIE.prepend_host(host, self._search_regex(
|
||||||
|
r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL'))
|
||||||
|
config = self._download_json(config_url, video_id,
|
||||||
note='Downloading player config')
|
note='Downloading player config')
|
||||||
url_template = 'http://nfl.com{contentURLTemplate:s}'.format(**config)
|
url_template = NFLIE.prepend_host(
|
||||||
video_data = self._download_json(url_template.format(id=video_id), video_id)
|
host, '{contentURLTemplate:}'.format(**config))
|
||||||
|
video_data = self._download_json(
|
||||||
cdns = config.get('cdns')
|
url_template.format(id=video_id), video_id)
|
||||||
if not cdns:
|
|
||||||
raise ExtractorError('Failed to get CDN data', expected=True)
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
streams = video_data.get('cdnData', {}).get('bitrateInfo', [])
|
cdn_data = video_data.get('cdnData', {})
|
||||||
for name, cdn in cdns.items():
|
streams = cdn_data.get('bitrateInfo', [])
|
||||||
# LimeLight streams don't seem to work
|
if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
|
||||||
if cdn.get('name') == 'LIMELIGHT':
|
parts = compat_urllib_parse_urlparse(cdn_data.get('uri'))
|
||||||
continue
|
protocol, host = parts.scheme, parts.netloc
|
||||||
|
|
||||||
protocol = cdn.get('protocol')
|
|
||||||
host = remove_end(cdn.get('host', ''), '/')
|
|
||||||
if not (protocol and host):
|
|
||||||
continue
|
|
||||||
|
|
||||||
path_prefix = cdn.get('pathprefix', '')
|
|
||||||
if path_prefix and not path_prefix.endswith('/'):
|
|
||||||
path_prefix = '%s/' % path_prefix
|
|
||||||
|
|
||||||
get_url = lambda p: '{protocol:s}://{host:s}/{prefix:s}{path:}'.format(
|
|
||||||
protocol=protocol,
|
|
||||||
host=host,
|
|
||||||
prefix=path_prefix,
|
|
||||||
path=p,
|
|
||||||
)
|
|
||||||
|
|
||||||
if protocol == 'rtmp':
|
|
||||||
preference = -2
|
|
||||||
elif 'prog' in name.lower():
|
|
||||||
preference = -1
|
|
||||||
else:
|
|
||||||
preference = 0
|
|
||||||
|
|
||||||
for stream in streams:
|
for stream in streams:
|
||||||
path = stream.get('path')
|
formats.append(
|
||||||
if not path:
|
NFLIE.format_from_stream(stream, protocol, host))
|
||||||
|
else:
|
||||||
|
cdns = config.get('cdns')
|
||||||
|
if not cdns:
|
||||||
|
raise ExtractorError('Failed to get CDN data', expected=True)
|
||||||
|
|
||||||
|
for name, cdn in cdns.items():
|
||||||
|
# LimeLight streams don't seem to work
|
||||||
|
if cdn.get('name') == 'LIMELIGHT':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
formats.append({
|
protocol = cdn.get('protocol')
|
||||||
'url': get_url(path),
|
host = remove_end(cdn.get('host', ''), '/')
|
||||||
'vbr': int_or_none(stream.get('rate', 0), 1000),
|
if not (protocol and host):
|
||||||
'preference': preference,
|
continue
|
||||||
'format_note': name,
|
|
||||||
})
|
prefix = cdn.get('pathprefix', '')
|
||||||
|
if prefix and not prefix.endswith('/'):
|
||||||
|
prefix = '%s/' % prefix
|
||||||
|
|
||||||
|
preference = 0
|
||||||
|
if protocol == 'rtmp':
|
||||||
|
preference = -2
|
||||||
|
elif 'prog' in name.lower():
|
||||||
|
preference = 1
|
||||||
|
|
||||||
|
for stream in streams:
|
||||||
|
formats.append(
|
||||||
|
NFLIE.format_from_stream(stream, protocol, host,
|
||||||
|
prefix, preference, name))
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
@ -94,7 +135,7 @@ class NFLIE(InfoExtractor):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_data.get('storyHeadline'),
|
'title': video_data.get('headline'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'description': video_data.get('caption'),
|
'description': video_data.get('caption'),
|
||||||
'duration': video_data.get('duration'),
|
'duration': video_data.get('duration'),
|
||||||
|
@ -39,18 +39,17 @@ class NiconicoIE(InfoExtractor):
|
|||||||
|
|
||||||
_VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)'
|
||||||
_NETRC_MACHINE = 'niconico'
|
_NETRC_MACHINE = 'niconico'
|
||||||
# Determine whether the downloader uses authentication to download video
|
# Determine whether the downloader used authentication to download video
|
||||||
_AUTHENTICATE = False
|
_AUTHENTICATED = False
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
if self._downloader.params.get('username', None) is not None:
|
self._login()
|
||||||
self._AUTHENTICATE = True
|
|
||||||
|
|
||||||
if self._AUTHENTICATE:
|
|
||||||
self._login()
|
|
||||||
|
|
||||||
def _login(self):
|
def _login(self):
|
||||||
(username, password) = self._get_login_info()
|
(username, password) = self._get_login_info()
|
||||||
|
# No authentication to be performed
|
||||||
|
if not username:
|
||||||
|
return True
|
||||||
|
|
||||||
# Log in
|
# Log in
|
||||||
login_form_strs = {
|
login_form_strs = {
|
||||||
@ -68,6 +67,8 @@ class NiconicoIE(InfoExtractor):
|
|||||||
if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
|
if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None:
|
||||||
self._downloader.report_warning('unable to log in: bad username or password')
|
self._downloader.report_warning('unable to log in: bad username or password')
|
||||||
return False
|
return False
|
||||||
|
# Successful login
|
||||||
|
self._AUTHENTICATED = True
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -82,7 +83,7 @@ class NiconicoIE(InfoExtractor):
|
|||||||
'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
|
'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
|
||||||
note='Downloading video info page')
|
note='Downloading video info page')
|
||||||
|
|
||||||
if self._AUTHENTICATE:
|
if self._AUTHENTICATED:
|
||||||
# Get flv info
|
# Get flv info
|
||||||
flv_info_webpage = self._download_webpage(
|
flv_info_webpage = self._download_webpage(
|
||||||
'http://flapi.nicovideo.jp/api/getflv?v=' + video_id,
|
'http://flapi.nicovideo.jp/api/getflv?v=' + video_id,
|
||||||
|
47
youtube_dl/extractor/oktoberfesttv.py
Normal file
47
youtube_dl/extractor/oktoberfesttv.py
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class OktoberfestTVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'hb-zelt',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'is_live': True,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
title = self._live_title(self._html_search_regex(
|
||||||
|
r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
|
||||||
|
|
||||||
|
clip = self._search_regex(
|
||||||
|
r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
|
||||||
|
ncurl = self._search_regex(
|
||||||
|
r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
|
||||||
|
video_url = ncurl + clip
|
||||||
|
thumbnail = self._search_regex(
|
||||||
|
r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
|
||||||
|
'thumbnail', fatal=False)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'is_live': True,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
}
|
@ -4,6 +4,7 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
unified_strdate,
|
||||||
US_RATINGS,
|
US_RATINGS,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -11,10 +12,10 @@ from ..utils import (
|
|||||||
class PBSIE(InfoExtractor):
|
class PBSIE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)https?://
|
_VALID_URL = r'''(?x)https?://
|
||||||
(?:
|
(?:
|
||||||
# Direct video URL
|
# Direct video URL
|
||||||
video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
|
video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
|
||||||
# Article with embedded player
|
# Article with embedded player (or direct video)
|
||||||
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+)/?(?:$|[?\#]) |
|
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
|
||||||
# Player
|
# Player
|
||||||
video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
|
video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
|
||||||
)
|
)
|
||||||
@ -65,10 +66,25 @@ class PBSIE(InfoExtractor):
|
|||||||
'duration': 6559,
|
'duration': 6559,
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html',
|
||||||
|
'md5': '908f3e5473a693b266b84e25e1cf9703',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2365160389',
|
||||||
|
'display_id': 'killer-typhoon',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'md5:c741d14e979fc53228c575894094f157',
|
||||||
|
'title': 'Killer Typhoon',
|
||||||
|
'duration': 3172,
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'upload_date': '20140122',
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def _extract_ids(self, url):
|
def _extract_webpage(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
|
||||||
presumptive_id = mobj.group('presumptive_id')
|
presumptive_id = mobj.group('presumptive_id')
|
||||||
@ -76,15 +92,20 @@ class PBSIE(InfoExtractor):
|
|||||||
if presumptive_id:
|
if presumptive_id:
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
upload_date = unified_strdate(self._search_regex(
|
||||||
|
r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
|
||||||
|
webpage, 'upload date', default=None))
|
||||||
|
|
||||||
MEDIA_ID_REGEXES = [
|
MEDIA_ID_REGEXES = [
|
||||||
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
|
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
|
||||||
r'class="coveplayerid">([^<]+)<', # coveplayer
|
r'class="coveplayerid">([^<]+)<', # coveplayer
|
||||||
|
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
|
||||||
]
|
]
|
||||||
|
|
||||||
media_id = self._search_regex(
|
media_id = self._search_regex(
|
||||||
MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
|
MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
|
||||||
if media_id:
|
if media_id:
|
||||||
return media_id, presumptive_id
|
return media_id, presumptive_id, upload_date
|
||||||
|
|
||||||
url = self._search_regex(
|
url = self._search_regex(
|
||||||
r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
|
r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
|
||||||
@ -104,10 +125,10 @@ class PBSIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
display_id = video_id
|
display_id = video_id
|
||||||
|
|
||||||
return video_id, display_id
|
return video_id, display_id, None
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id, display_id = self._extract_ids(url)
|
video_id, display_id, upload_date = self._extract_webpage(url)
|
||||||
|
|
||||||
info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id
|
info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id
|
||||||
info = self._download_json(info_url, display_id)
|
info = self._download_json(info_url, display_id)
|
||||||
@ -119,6 +140,7 @@ class PBSIE(InfoExtractor):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
'title': info['title'],
|
'title': info['title'],
|
||||||
'url': info['alternate_encoding']['url'],
|
'url': info['alternate_encoding']['url'],
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -126,4 +148,5 @@ class PBSIE(InfoExtractor):
|
|||||||
'thumbnail': info.get('image_url'),
|
'thumbnail': info.get('image_url'),
|
||||||
'duration': info.get('duration'),
|
'duration': info.get('duration'),
|
||||||
'age_limit': age_limit,
|
'age_limit': age_limit,
|
||||||
|
'upload_date': upload_date,
|
||||||
}
|
}
|
||||||
|
60
youtube_dl/extractor/planetaplay.py
Normal file
60
youtube_dl/extractor/planetaplay.py
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class PlanetaPlayIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?planetaplay\.com/\?sng=(?P<id>[0-9]+)'
|
||||||
|
_API_URL = 'http://planetaplay.com/action/playlist/?sng={0:}'
|
||||||
|
_THUMBNAIL_URL = 'http://planetaplay.com/img/thumb/{thumb:}'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://planetaplay.com/?sng=3586',
|
||||||
|
'md5': '9d569dceb7251a4e01355d5aea60f9db',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3586',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'md5:e829428ee28b1deed00de90de49d1da1',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_SONG_FORMATS = {
|
||||||
|
'lq': (0, 'http://www.planetaplay.com/videoplayback/{med_hash:}'),
|
||||||
|
'hq': (1, 'http://www.planetaplay.com/videoplayback/hi/{med_hash:}'),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
|
response = self._download_json(
|
||||||
|
self._API_URL.format(video_id), video_id)['response']
|
||||||
|
try:
|
||||||
|
data = response.get('data')[0]
|
||||||
|
except IndexError:
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s: failed to get the playlist' % self.IE_NAME, expected=True)
|
||||||
|
|
||||||
|
title = '{song_artists:} - {sng_name:}'.format(**data)
|
||||||
|
thumbnail = self._THUMBNAIL_URL.format(**data)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for format_id, (quality, url_template) in self._SONG_FORMATS.items():
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_id,
|
||||||
|
'url': url_template.format(**data),
|
||||||
|
'quality': quality,
|
||||||
|
'ext': 'flv',
|
||||||
|
})
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
}
|
55
youtube_dl/extractor/played.py
Normal file
55
youtube_dl/extractor/played.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_parse,
|
||||||
|
compat_urllib_request,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PlayedIE(InfoExtractor):
|
||||||
|
IE_NAME = 'played.to'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?played\.to/(?P<id>[a-zA-Z0-9_-]+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://played.to/j2f2sfiiukgt',
|
||||||
|
'md5': 'c2bd75a368e82980e7257bf500c00637',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'j2f2sfiiukgt',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'youtube-dl_test_video.mp4',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
orig_webpage = self._download_webpage(url, video_id)
|
||||||
|
fields = re.findall(
|
||||||
|
r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
|
||||||
|
data = dict(fields)
|
||||||
|
|
||||||
|
self._sleep(2, video_id)
|
||||||
|
|
||||||
|
post = compat_urllib_parse.urlencode(data)
|
||||||
|
headers = {
|
||||||
|
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||||
|
}
|
||||||
|
req = compat_urllib_request.Request(url, post, headers)
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
req, video_id, note='Downloading video page ...')
|
||||||
|
|
||||||
|
title = os.path.splitext(data['fname'])[0]
|
||||||
|
|
||||||
|
video_url = self._search_regex(
|
||||||
|
r'file: "?(.+?)",', webpage, 'video URL')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'url': video_url,
|
||||||
|
}
|
@ -4,19 +4,27 @@ import re
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import int_or_none
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
js_to_json,
|
||||||
|
qualities,
|
||||||
|
determine_ext,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class PornHdIE(InfoExtractor):
|
class PornHdIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)'
|
_VALID_URL = r'http://(?:www\.)?pornhd\.com/(?:[a-z]{2,4}/)?videos/(?P<id>\d+)(?:/(?P<display_id>.+))?'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
|
'url': 'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
|
||||||
'md5': '956b8ca569f7f4d8ec563e2c41598441',
|
'md5': '956b8ca569f7f4d8ec563e2c41598441',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1962',
|
'id': '1962',
|
||||||
|
'display_id': 'sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Sierra loves doing laundry',
|
'title': 'Sierra loves doing laundry',
|
||||||
'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
|
'description': 'md5:8ff0523848ac2b8f9b065ba781ccf294',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'view_count': int,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -24,8 +32,9 @@ class PornHdIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
display_id = mobj.group('display_id')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, display_id or video_id)
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<title>(.+) porn HD.+?</title>', webpage, 'title')
|
r'<title>(.+) porn HD.+?</title>', webpage, 'title')
|
||||||
@ -33,38 +42,21 @@ class PornHdIE(InfoExtractor):
|
|||||||
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
|
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
|
||||||
view_count = int_or_none(self._html_search_regex(
|
view_count = int_or_none(self._html_search_regex(
|
||||||
r'(\d+) views\s*</span>', webpage, 'view count', fatal=False))
|
r'(\d+) views\s*</span>', webpage, 'view count', fatal=False))
|
||||||
|
thumbnail = self._search_regex(
|
||||||
|
r"'poster'\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
videos = re.findall(
|
quality = qualities(['SD', 'HD'])
|
||||||
r'var __video([\da-zA-Z]+?)(Low|High)StreamUrl = \'(http://.+?)\?noProxy=1\'', webpage)
|
formats = [{
|
||||||
|
'url': source['file'],
|
||||||
mobj = re.search(r'flashVars = (?P<flashvars>{.+?});', webpage)
|
'format_id': '%s-%s' % (source['label'], determine_ext(source['file'])),
|
||||||
if mobj:
|
'quality': quality(source['label']),
|
||||||
flashvars = json.loads(mobj.group('flashvars'))
|
} for source in json.loads(js_to_json(self._search_regex(
|
||||||
for key, quality in [('hashlink', 'low'), ('hd', 'high')]:
|
r"(?s)'sources'\s*:\s*(\[.+?\])", webpage, 'sources')))]
|
||||||
redirect_url = flashvars.get(key)
|
|
||||||
if redirect_url:
|
|
||||||
videos.append(('flv', quality, redirect_url))
|
|
||||||
thumbnail = flashvars['urlWallpaper']
|
|
||||||
else:
|
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for format_, quality, redirect_url in videos:
|
|
||||||
format_id = '%s-%s' % (format_.lower(), quality.lower())
|
|
||||||
video_url = self._download_webpage(
|
|
||||||
redirect_url, video_id, 'Downloading %s video link' % format_id, fatal=False)
|
|
||||||
if not video_url:
|
|
||||||
continue
|
|
||||||
formats.append({
|
|
||||||
'url': video_url,
|
|
||||||
'ext': format_.lower(),
|
|
||||||
'format_id': format_id,
|
|
||||||
'quality': 1 if quality.lower() == 'high' else 0,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
|
@ -144,7 +144,7 @@ class ProSiebenSat1IE(InfoExtractor):
|
|||||||
'id': '2156342',
|
'id': '2156342',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Kurztrips zum Valentinstag',
|
'title': 'Kurztrips zum Valentinstag',
|
||||||
'description': 'md5:8ba6301e70351ae0bedf8da00f7ba528',
|
'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.',
|
||||||
'duration': 307.24,
|
'duration': 307.24,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
@ -180,12 +180,10 @@ class ProSiebenSat1IE(InfoExtractor):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
page = self._download_webpage(url, video_id, 'Downloading page')
|
clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id')
|
||||||
|
|
||||||
clip_id = self._html_search_regex(self._CLIPID_REGEXES, page, 'clip id')
|
|
||||||
|
|
||||||
access_token = 'testclient'
|
access_token = 'testclient'
|
||||||
client_name = 'kolibri-1.2.5'
|
client_name = 'kolibri-1.2.5'
|
||||||
@ -234,12 +232,12 @@ class ProSiebenSat1IE(InfoExtractor):
|
|||||||
|
|
||||||
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
|
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
|
||||||
|
|
||||||
title = self._html_search_regex(self._TITLE_REGEXES, page, 'title')
|
title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
|
||||||
description = self._html_search_regex(self._DESCRIPTION_REGEXES, page, 'description', fatal=False)
|
description = self._html_search_regex(self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
|
||||||
thumbnail = self._og_search_thumbnail(page)
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
|
||||||
upload_date = unified_strdate(self._html_search_regex(
|
upload_date = unified_strdate(self._html_search_regex(
|
||||||
self._UPLOAD_DATE_REGEXES, page, 'upload date', default=None))
|
self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ from ..utils import (
|
|||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
int_or_none,
|
|
||||||
)
|
)
|
||||||
from ..aes import aes_decrypt_text
|
from ..aes import aes_decrypt_text
|
||||||
|
|
||||||
@ -40,31 +39,42 @@ class SpankwireIE(InfoExtractor):
|
|||||||
req.add_header('Cookie', 'age_verified=1')
|
req.add_header('Cookie', 'age_verified=1')
|
||||||
webpage = self._download_webpage(req, video_id)
|
webpage = self._download_webpage(req, video_id)
|
||||||
|
|
||||||
title = self._html_search_regex(r'<h1>([^<]+)', webpage, 'title')
|
title = self._html_search_regex(
|
||||||
|
r'<h1>([^<]+)', webpage, 'title')
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'<div\s+id="descriptionContent">([^<]+)<', webpage, 'description', fatal=False)
|
r'<div\s+id="descriptionContent">([^<]+)<',
|
||||||
|
webpage, 'description', fatal=False)
|
||||||
thumbnail = self._html_search_regex(
|
thumbnail = self._html_search_regex(
|
||||||
r'flashvars\.image_url = "([^"]+)', webpage, 'thumbnail', fatal=False)
|
r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']',
|
||||||
|
webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
uploader = self._html_search_regex(
|
uploader = self._html_search_regex(
|
||||||
r'by:\s*<a [^>]*>(.+?)</a>', webpage, 'uploader', fatal=False)
|
r'by:\s*<a [^>]*>(.+?)</a>',
|
||||||
|
webpage, 'uploader', fatal=False)
|
||||||
uploader_id = self._html_search_regex(
|
uploader_id = self._html_search_regex(
|
||||||
r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"', webpage, 'uploader id', fatal=False)
|
r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"',
|
||||||
upload_date = self._html_search_regex(r'</a> on (.+?) at \d+:\d+', webpage, 'upload date', fatal=False)
|
webpage, 'uploader id', fatal=False)
|
||||||
if upload_date:
|
upload_date = unified_strdate(self._html_search_regex(
|
||||||
upload_date = unified_strdate(upload_date)
|
r'</a> on (.+?) at \d+:\d+',
|
||||||
|
webpage, 'upload date', fatal=False))
|
||||||
|
|
||||||
view_count = self._html_search_regex(
|
view_count = str_to_int(self._html_search_regex(
|
||||||
r'<div id="viewsCounter"><span>([^<]+)</span> views</div>', webpage, 'view count', fatal=False)
|
r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>',
|
||||||
if view_count:
|
webpage, 'view count', fatal=False))
|
||||||
view_count = str_to_int(view_count)
|
comment_count = str_to_int(self._html_search_regex(
|
||||||
comment_count = int_or_none(self._html_search_regex(
|
r'Comments<span[^>]+>\s*\(([\d,\.]+)\)</span>',
|
||||||
r'<span id="spCommentCount">\s*(\d+)</span> Comments</div>', webpage, 'comment count', fatal=False))
|
webpage, 'comment count', fatal=False))
|
||||||
|
|
||||||
video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
|
video_urls = list(map(
|
||||||
|
compat_urllib_parse.unquote,
|
||||||
|
re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*["\']([^"\']+)["\']', webpage)))
|
||||||
if webpage.find('flashvars\.encrypted = "true"') != -1:
|
if webpage.find('flashvars\.encrypted = "true"') != -1:
|
||||||
password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, 'password').replace('+', ' ')
|
password = self._html_search_regex(
|
||||||
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
|
r'flashvars\.video_title = "([^"]+)',
|
||||||
|
webpage, 'password').replace('+', ' ')
|
||||||
|
video_urls = list(map(
|
||||||
|
lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'),
|
||||||
|
video_urls))
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for video_url in video_urls:
|
for video_url in video_urls:
|
||||||
|
92
youtube_dl/extractor/sport5.py
Normal file
92
youtube_dl/extractor/sport5.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class Sport5IE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)'
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 's5-Y59xx1-GUh2',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'ולנסיה-קורדובה 0:3',
|
||||||
|
'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה',
|
||||||
|
'duration': 228,
|
||||||
|
'categories': list,
|
||||||
|
},
|
||||||
|
'skip': 'Blocked outside of Israel',
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 's5-SiXxx1-hKh2',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'GOALS_CELTIC_270914.mp4',
|
||||||
|
'description': '',
|
||||||
|
'duration': 87,
|
||||||
|
'categories': list,
|
||||||
|
},
|
||||||
|
'skip': 'Blocked outside of Israel',
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
media_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, media_id)
|
||||||
|
|
||||||
|
video_id = self._html_search_regex('clipId=([\w-]+)', webpage, 'video id')
|
||||||
|
|
||||||
|
metadata = self._download_xml(
|
||||||
|
'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id,
|
||||||
|
video_id)
|
||||||
|
|
||||||
|
error = metadata.find('./Error')
|
||||||
|
if error is not None:
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s returned error: %s - %s' % (
|
||||||
|
self.IE_NAME,
|
||||||
|
error.find('./Name').text,
|
||||||
|
error.find('./Description').text),
|
||||||
|
expected=True)
|
||||||
|
|
||||||
|
title = metadata.find('./Title').text
|
||||||
|
description = metadata.find('./Description').text
|
||||||
|
duration = int(metadata.find('./Duration').text)
|
||||||
|
|
||||||
|
posters_el = metadata.find('./PosterLinks')
|
||||||
|
thumbnails = [{
|
||||||
|
'url': thumbnail.text,
|
||||||
|
'width': int(thumbnail.get('width')),
|
||||||
|
'height': int(thumbnail.get('height')),
|
||||||
|
} for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else []
|
||||||
|
|
||||||
|
categories_el = metadata.find('./Categories')
|
||||||
|
categories = [
|
||||||
|
cat.get('name') for cat in categories_el.findall('./Category')
|
||||||
|
] if categories_el is not None else []
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'url': fmt.text,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'vbr': int(fmt.get('bitrate')),
|
||||||
|
'width': int(fmt.get('width')),
|
||||||
|
'height': int(fmt.get('height')),
|
||||||
|
} for fmt in metadata.findall('./PlaybackLinks/FileURL')]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'duration': duration,
|
||||||
|
'categories': categories,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
81
youtube_dl/extractor/sportbox.py
Normal file
81
youtube_dl/extractor/sportbox.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
parse_duration,
|
||||||
|
parse_iso8601,
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SportBoxIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://news\.sportbox\.ru/Vidy_sporta/(?:[^/]+/)+spbvideo_NI\d+_(?P<display_id>.+)'
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://news.sportbox.ru/Vidy_sporta/Avtosport/Rossijskij/spbvideo_NI483529_Gonka-2-zaezd-Obyedinenniy-2000-klassi-Turing-i-S',
|
||||||
|
'md5': 'ff56a598c2cf411a9a38a69709e97079',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '80822',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Гонка 2 заезд ««Объединенный 2000»: классы Туринг и Супер-продакшн',
|
||||||
|
'description': 'md5:81715fa9c4ea3d9e7915dc8180c778ed',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'timestamp': 1411896237,
|
||||||
|
'upload_date': '20140928',
|
||||||
|
'duration': 4846,
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://news.sportbox.ru/Vidy_sporta/billiard/spbvideo_NI486287_CHempionat-mira-po-dinamichnoy-piramide-4',
|
||||||
|
'only_matching': True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
display_id = mobj.group('display_id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'src="/vdl/player/media/(\d+)"', webpage, 'video id')
|
||||||
|
|
||||||
|
player = self._download_webpage(
|
||||||
|
'http://news.sportbox.ru/vdl/player/media/%s' % video_id,
|
||||||
|
display_id, 'Downloading player webpage')
|
||||||
|
|
||||||
|
hls = self._search_regex(
|
||||||
|
r"var\s+original_hls_file\s*=\s*'([^']+)'", player, 'hls file')
|
||||||
|
|
||||||
|
formats = self._extract_m3u8_formats(hls, display_id, 'mp4')
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<h1 itemprop="name">([^<]+)</h1>', webpage, 'title')
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'(?s)<div itemprop="description">(.+?)</div>', webpage, 'description', fatal=False)
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
timestamp = parse_iso8601(self._search_regex(
|
||||||
|
r'<span itemprop="uploadDate">([^<]+)</span>', webpage, 'timestamp', fatal=False))
|
||||||
|
duration = parse_duration(self._html_search_regex(
|
||||||
|
r'<meta itemprop="duration" content="PT([^"]+)">', webpage, 'duration', fatal=False))
|
||||||
|
view_count = int_or_none(self._html_search_regex(
|
||||||
|
r'<span>Просмотров: (\d+)</span>', player, 'view count', fatal=False))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'duration': duration,
|
||||||
|
'view_count': view_count,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
@ -17,11 +17,11 @@ class SportDeutschlandIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
|
'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'LIVE: Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
|
'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
|
||||||
'categories': ['Badminton'],
|
'categories': ['Badminton'],
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
'description': 're:^Die Badminton-WM 2014 aus Kopenhagen LIVE',
|
'description': 're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV',
|
||||||
'timestamp': int,
|
'timestamp': int,
|
||||||
'upload_date': 're:^201408[23][0-9]$',
|
'upload_date': 're:^201408[23][0-9]$',
|
||||||
},
|
},
|
||||||
|
@ -39,10 +39,10 @@ class SunPornoIE(InfoExtractor):
|
|||||||
r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
|
r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
duration = parse_duration(self._search_regex(
|
duration = parse_duration(self._search_regex(
|
||||||
r'<span>Duration: (\d+:\d+)</span>', webpage, 'duration', fatal=False))
|
r'Duration:\s*(\d+:\d+)\s*<', webpage, 'duration', fatal=False))
|
||||||
|
|
||||||
view_count = int_or_none(self._html_search_regex(
|
view_count = int_or_none(self._html_search_regex(
|
||||||
r'<span class="views">(\d+)</span>', webpage, 'view count', fatal=False))
|
r'class="views">\s*(\d+)\s*<', webpage, 'view count', fatal=False))
|
||||||
comment_count = int_or_none(self._html_search_regex(
|
comment_count = int_or_none(self._html_search_regex(
|
||||||
r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False))
|
r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False))
|
||||||
|
|
||||||
|
104
youtube_dl/extractor/tapely.py
Normal file
104
youtube_dl/extractor/tapely.py
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
clean_html,
|
||||||
|
compat_urllib_request,
|
||||||
|
float_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TapelyIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
|
||||||
|
_API_URL = 'http://tape.ly/showtape?id={0:}'
|
||||||
|
_S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}'
|
||||||
|
_SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}'
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://tape.ly/my-grief-as-told-by-water',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 23952,
|
||||||
|
'title': 'my grief as told by water',
|
||||||
|
'thumbnail': 're:^https?://.*\.png$',
|
||||||
|
'uploader_id': 16484,
|
||||||
|
'timestamp': 1411848286,
|
||||||
|
'description': 'For Robin and Ponkers, whom the tides of life have taken out to sea.',
|
||||||
|
},
|
||||||
|
'playlist_count': 13,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://tape.ly/my-grief-as-told-by-water/1',
|
||||||
|
'md5': '79031f459fdec6530663b854cbc5715c',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 258464,
|
||||||
|
'title': 'Dreaming Awake (My Brightest Diamond)',
|
||||||
|
'ext': 'm4a',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
display_id = mobj.group('id')
|
||||||
|
|
||||||
|
playlist_url = self._API_URL.format(display_id)
|
||||||
|
request = compat_urllib_request.Request(playlist_url)
|
||||||
|
request.add_header('X-Requested-With', 'XMLHttpRequest')
|
||||||
|
request.add_header('Accept', 'application/json')
|
||||||
|
|
||||||
|
playlist = self._download_json(request, display_id)
|
||||||
|
|
||||||
|
tape = playlist['tape']
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for s in tape['songs']:
|
||||||
|
song = s['song']
|
||||||
|
entry = {
|
||||||
|
'id': song['id'],
|
||||||
|
'duration': float_or_none(song.get('songduration'), 1000),
|
||||||
|
'title': song['title'],
|
||||||
|
}
|
||||||
|
if song['source'] == 'S3':
|
||||||
|
entry.update({
|
||||||
|
'url': self._S3_SONG_URL.format(song['filename']),
|
||||||
|
})
|
||||||
|
entries.append(entry)
|
||||||
|
elif song['source'] == 'YT':
|
||||||
|
self.to_screen('YouTube video detected')
|
||||||
|
yt_id = song['filename'].replace('/youtube/', '')
|
||||||
|
entry.update(self.url_result(yt_id, 'Youtube', video_id=yt_id))
|
||||||
|
entries.append(entry)
|
||||||
|
elif song['source'] == 'SC':
|
||||||
|
self.to_screen('SoundCloud song detected')
|
||||||
|
sc_url = self._SOUNDCLOUD_SONG_URL.format(song['filename'])
|
||||||
|
entry.update(self.url_result(sc_url, 'Soundcloud'))
|
||||||
|
entries.append(entry)
|
||||||
|
else:
|
||||||
|
self.report_warning('Unknown song source: %s' % song['source'])
|
||||||
|
|
||||||
|
if mobj.group('songnr'):
|
||||||
|
songnr = int(mobj.group('songnr')) - 1
|
||||||
|
try:
|
||||||
|
return entries[songnr]
|
||||||
|
except IndexError:
|
||||||
|
raise ExtractorError(
|
||||||
|
'No song with index: %s' % mobj.group('songnr'),
|
||||||
|
expected=True)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': tape['id'],
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': tape['name'],
|
||||||
|
'entries': entries,
|
||||||
|
'thumbnail': tape.get('image_url'),
|
||||||
|
'description': clean_html(tape.get('subtext')),
|
||||||
|
'like_count': tape.get('likescount'),
|
||||||
|
'uploader_id': tape.get('user_id'),
|
||||||
|
'timestamp': parse_iso8601(tape.get('published_at')),
|
||||||
|
}
|
@ -149,7 +149,7 @@ class TEDIE(SubtitlesInfoExtractor):
|
|||||||
thumbnail = 'http://' + thumbnail
|
thumbnail = 'http://' + thumbnail
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': talk_info['title'],
|
'title': talk_info['title'].strip(),
|
||||||
'uploader': talk_info['speaker'],
|
'uploader': talk_info['speaker'],
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
|
70
youtube_dl/extractor/theonion.py
Normal file
70
youtube_dl/extractor/theonion.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class TheOnionIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?x)https?://(?:www\.)?theonion\.com/video/[^,]+,(?P<article_id>[0-9]+)/?'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.theonion.com/video/man-wearing-mm-jacket-gods-image,36918/',
|
||||||
|
'md5': '19eaa9a39cf9b9804d982e654dc791ee',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2133',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Man Wearing M&M Jacket Apparently Made In God\'s Image',
|
||||||
|
'description': 'md5:cc12448686b5600baae9261d3e180910',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg\?\d+$',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
article_id = mobj.group('article_id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, article_id)
|
||||||
|
|
||||||
|
video_id = self._search_regex(
|
||||||
|
r'"videoId":\s(\d+),', webpage, 'video ID')
|
||||||
|
title = self._og_search_title(webpage)
|
||||||
|
description = self._og_search_description(webpage)
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
|
||||||
|
sources = re.findall(r'<source src="([^"]+)" type="([^"]+)"', webpage)
|
||||||
|
if not sources:
|
||||||
|
raise ExtractorError(
|
||||||
|
'No sources found for video %s' % video_id, expected=True)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for src, type_ in sources:
|
||||||
|
if type_ == 'video/mp4':
|
||||||
|
formats.append({
|
||||||
|
'format_id': 'mp4_sd',
|
||||||
|
'preference': 1,
|
||||||
|
'url': src,
|
||||||
|
})
|
||||||
|
elif type_ == 'video/webm':
|
||||||
|
formats.append({
|
||||||
|
'format_id': 'webm_sd',
|
||||||
|
'preference': 0,
|
||||||
|
'url': src,
|
||||||
|
})
|
||||||
|
elif type_ == 'application/x-mpegURL':
|
||||||
|
formats.extend(
|
||||||
|
self._extract_m3u8_formats(src, video_id, preference=-1))
|
||||||
|
else:
|
||||||
|
self.report_warning(
|
||||||
|
'Encountered unexpected format: %s' % type_)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'description': description,
|
||||||
|
}
|
100
youtube_dl/extractor/thesixtyone.py
Normal file
100
youtube_dl/extractor/thesixtyone.py
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import unified_strdate
|
||||||
|
|
||||||
|
|
||||||
|
class TheSixtyOneIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'''(?x)https?://(?:www\.)?thesixtyone\.com/
|
||||||
|
(?:.*?/)*
|
||||||
|
(?:
|
||||||
|
s|
|
||||||
|
song/comments/list|
|
||||||
|
song
|
||||||
|
)/(?P<id>[A-Za-z0-9]+)/?$'''
|
||||||
|
_SONG_URL_TEMPLATE = 'http://thesixtyone.com/s/{0:}'
|
||||||
|
_SONG_FILE_URL_TEMPLATE = 'http://{audio_server:}.thesixtyone.com/thesixtyone_production/audio/{0:}_stream'
|
||||||
|
_THUMBNAIL_URL_TEMPLATE = '{photo_base_url:}_desktop'
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://www.thesixtyone.com/s/SrE3zD7s1jt/',
|
||||||
|
'md5': '821cc43b0530d3222e3e2b70bb4622ea',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'SrE3zD7s1jt',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'CASIO - Unicorn War Mixtape',
|
||||||
|
'thumbnail': 're:^https?://.*_desktop$',
|
||||||
|
'upload_date': '20071217',
|
||||||
|
'duration': 3208,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.thesixtyone.com/song/comments/list/SrE3zD7s1jt',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.thesixtyone.com/s/ULoiyjuJWli#/s/SrE3zD7s1jt/',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.thesixtyone.com/#/s/SrE3zD7s1jt/',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://www.thesixtyone.com/song/SrE3zD7s1jt/',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
_DECODE_MAP = {
|
||||||
|
"x": "a",
|
||||||
|
"m": "b",
|
||||||
|
"w": "c",
|
||||||
|
"q": "d",
|
||||||
|
"n": "e",
|
||||||
|
"p": "f",
|
||||||
|
"a": "0",
|
||||||
|
"h": "1",
|
||||||
|
"e": "2",
|
||||||
|
"u": "3",
|
||||||
|
"s": "4",
|
||||||
|
"i": "5",
|
||||||
|
"o": "6",
|
||||||
|
"y": "7",
|
||||||
|
"r": "8",
|
||||||
|
"c": "9"
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
song_id = mobj.group('id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
self._SONG_URL_TEMPLATE.format(song_id), song_id)
|
||||||
|
|
||||||
|
song_data = json.loads(self._search_regex(
|
||||||
|
r'"%s":\s(\{.*?\})' % song_id, webpage, 'song_data'))
|
||||||
|
keys = [self._DECODE_MAP.get(s, s) for s in song_data['key']]
|
||||||
|
url = self._SONG_FILE_URL_TEMPLATE.format(
|
||||||
|
"".join(reversed(keys)), **song_data)
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'format_id': 'sd',
|
||||||
|
'url': url,
|
||||||
|
'ext': 'mp3',
|
||||||
|
}]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': song_id,
|
||||||
|
'title': '{artist:} - {name:}'.format(**song_data),
|
||||||
|
'formats': formats,
|
||||||
|
'comment_count': song_data.get('comments_count'),
|
||||||
|
'duration': song_data.get('play_time'),
|
||||||
|
'like_count': song_data.get('score'),
|
||||||
|
'thumbnail': self._THUMBNAIL_URL_TEMPLATE.format(**song_data),
|
||||||
|
'upload_date': unified_strdate(song_data.get('publish_date')),
|
||||||
|
}
|
@ -26,8 +26,7 @@ class THVideoIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
# extract download link from mobile player page
|
# extract download link from mobile player page
|
||||||
webpage_player = self._download_webpage(
|
webpage_player = self._download_webpage(
|
||||||
@ -57,3 +56,29 @@ class THVideoIE(InfoExtractor):
|
|||||||
'description': description,
|
'description': description,
|
||||||
'upload_date': upload_date
|
'upload_date': upload_date
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class THVideoPlaylistIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P<id>[0-9]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://thvideo.tv/mylist2',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2',
|
||||||
|
'title': '幻想万華鏡',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 23,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
playlist_id = self._match_id(url)
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, playlist_id)
|
||||||
|
list_title = self._html_search_regex(
|
||||||
|
r'<h1 class="show_title">(.*?)<b id', webpage, 'playlist title',
|
||||||
|
fatal=False)
|
||||||
|
|
||||||
|
entries = [
|
||||||
|
self.url_result('http://thvideo.tv/v/th' + id, 'THVideo')
|
||||||
|
for id in re.findall(r'<dd><a href="http://thvideo.tv/v/th(\d+)/" target=', webpage)]
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, list_title)
|
||||||
|
@ -17,16 +17,16 @@ class TvigleIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'http://www.tvigle.ru/video/brat-2/',
|
'url': 'http://www.tvigle.ru/video/brat/',
|
||||||
'md5': '72cb7eab33e54314e1790da402d3c9c3',
|
'md5': 'ff4344a4894b0524441fb6f8218dc716',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5119390',
|
'id': '5118490',
|
||||||
'display_id': 'brat-2',
|
'display_id': 'brat',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Брат 2 ',
|
'title': 'Брат',
|
||||||
'description': 'md5:5751f4fe345a58e1692585c361294bd8',
|
'description': 'md5:d16ac7c0b47052ea51fddb92c4e413eb',
|
||||||
'duration': 7356.369,
|
'duration': 5722.6,
|
||||||
'age_limit': 0,
|
'age_limit': 16,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -71,6 +71,7 @@ class TvigleIE(InfoExtractor):
|
|||||||
'format_id': '%s-%s' % (vcodec, quality),
|
'format_id': '%s-%s' % (vcodec, quality),
|
||||||
'vcodec': vcodec,
|
'vcodec': vcodec,
|
||||||
'height': int(quality[:-1]),
|
'height': int(quality[:-1]),
|
||||||
|
'filesize': item['video_files_size'][vcodec][quality],
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ class Vbox7IE(InfoExtractor):
|
|||||||
'md5': '99f65c0c9ef9b682b97313e052734c3f',
|
'md5': '99f65c0c9ef9b682b97313e052734c3f',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '249bb972c2',
|
'id': '249bb972c2',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Смях! Чудо - чист за секунди - Скрита камера',
|
'title': 'Смях! Чудо - чист за секунди - Скрита камера',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -50,7 +50,6 @@ class Vbox7IE(InfoExtractor):
|
|||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': final_url,
|
'url': final_url,
|
||||||
'ext': 'flv',
|
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': thumbnail_url,
|
'thumbnail': thumbnail_url,
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@ import xml.etree.ElementTree
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_HTTPError,
|
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
@ -25,7 +24,7 @@ class VevoIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
|
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
|
||||||
"md5": "06bea460acb744eab74a9d7dcb4bfd61",
|
"md5": "95ee28ee45e70130e3ab02b0f579ae23",
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'GB1101300280',
|
'id': 'GB1101300280',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
@ -41,7 +40,7 @@ class VevoIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'note': 'v3 SMIL format',
|
'note': 'v3 SMIL format',
|
||||||
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
|
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
|
||||||
'md5': '893ec0e0d4426a1d96c01de8f2bdff58',
|
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'USUV71302923',
|
'id': 'USUV71302923',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
|
@ -31,7 +31,7 @@ class VGTVIE(InfoExtractor):
|
|||||||
'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
|
'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '100764',
|
'id': '100764',
|
||||||
'ext': 'mp4',
|
'ext': 'flv',
|
||||||
'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
|
'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
|
||||||
'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
|
'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
|
||||||
'thumbnail': 're:^https?://.*\.jpg',
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
@ -50,7 +50,7 @@ class VGTVIE(InfoExtractor):
|
|||||||
'url': 'http://www.vgtv.no/#!/live/100015/direkte-her-kan-du-se-laksen-live-fra-suldalslaagen',
|
'url': 'http://www.vgtv.no/#!/live/100015/direkte-her-kan-du-se-laksen-live-fra-suldalslaagen',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '100015',
|
'id': '100015',
|
||||||
'ext': 'mp4',
|
'ext': 'flv',
|
||||||
'title': 'DIREKTE: Her kan du se laksen live fra Suldalslågen!',
|
'title': 'DIREKTE: Her kan du se laksen live fra Suldalslågen!',
|
||||||
'description': 'md5:9a60cc23fa349f761628924e56eeec2d',
|
'description': 'md5:9a60cc23fa349f761628924e56eeec2d',
|
||||||
'thumbnail': 're:^https?://.*\.jpg',
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
@ -8,17 +8,19 @@ import itertools
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .subtitles import SubtitlesInfoExtractor
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
clean_html,
|
compat_urlparse,
|
||||||
get_element_by_attribute,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
get_element_by_attribute,
|
||||||
|
InAdvancePagedList,
|
||||||
|
int_or_none,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
std_headers,
|
std_headers,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
int_or_none,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -54,7 +56,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
|
|
||||||
# _VALID_URL matches Vimeo URLs
|
# _VALID_URL matches Vimeo URLs
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
(?P<proto>(?:https?:)?//)?
|
https?://
|
||||||
(?:(?:www|(?P<player>player))\.)?
|
(?:(?:www|(?P<player>player))\.)?
|
||||||
vimeo(?P<pro>pro)?\.com/
|
vimeo(?P<pro>pro)?\.com/
|
||||||
(?!channels/[^/?#]+/?(?:$|[?#])|album/)
|
(?!channels/[^/?#]+/?(?:$|[?#])|album/)
|
||||||
@ -89,6 +91,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'uploader_id': 'openstreetmapus',
|
'uploader_id': 'openstreetmapus',
|
||||||
'uploader': 'OpenStreetMap US',
|
'uploader': 'OpenStreetMap US',
|
||||||
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
|
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
|
||||||
|
'description': 'md5:380943ec71b89736ff4bf27183233d09',
|
||||||
'duration': 1595,
|
'duration': 1595,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -103,6 +106,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'uploader': 'The BLN & Business of Software',
|
'uploader': 'The BLN & Business of Software',
|
||||||
'uploader_id': 'theblnbusinessofsoftware',
|
'uploader_id': 'theblnbusinessofsoftware',
|
||||||
'duration': 3610,
|
'duration': 3610,
|
||||||
|
'description': None,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -117,6 +121,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'uploader_id': 'user18948128',
|
'uploader_id': 'user18948128',
|
||||||
'uploader': 'Jaime Marquínez Ferrándiz',
|
'uploader': 'Jaime Marquínez Ferrándiz',
|
||||||
'duration': 10,
|
'duration': 10,
|
||||||
|
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'videopassword': 'youtube-dl',
|
'videopassword': 'youtube-dl',
|
||||||
@ -203,6 +208,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
# Extract ID from URL
|
# Extract ID from URL
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
orig_url = url
|
||||||
if mobj.group('pro') or mobj.group('player'):
|
if mobj.group('pro') or mobj.group('player'):
|
||||||
url = 'http://player.vimeo.com/video/' + video_id
|
url = 'http://player.vimeo.com/video/' + video_id
|
||||||
|
|
||||||
@ -273,18 +279,23 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
|
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
|
||||||
|
|
||||||
# Extract video description
|
# Extract video description
|
||||||
video_description = None
|
|
||||||
try:
|
video_description = self._html_search_regex(
|
||||||
video_description = get_element_by_attribute("class", "description_wrapper", webpage)
|
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
|
||||||
if video_description:
|
webpage, 'description', default=None)
|
||||||
video_description = clean_html(video_description)
|
if not video_description:
|
||||||
except AssertionError as err:
|
video_description = self._html_search_meta(
|
||||||
# On some pages like (http://player.vimeo.com/video/54469442) the
|
'description', webpage, default=None)
|
||||||
# html tags are not closed, python 2.6 cannot handle it
|
if not video_description and mobj.group('pro'):
|
||||||
if err.args[0] == 'we should not get here!':
|
orig_webpage = self._download_webpage(
|
||||||
pass
|
orig_url, video_id,
|
||||||
else:
|
note='Downloading webpage for description',
|
||||||
raise
|
fatal=False)
|
||||||
|
if orig_webpage:
|
||||||
|
video_description = self._html_search_meta(
|
||||||
|
'description', orig_webpage, default=None)
|
||||||
|
if not video_description and not mobj.group('player'):
|
||||||
|
self._downloader.report_warning('Cannot find video description')
|
||||||
|
|
||||||
# Extract video duration
|
# Extract video duration
|
||||||
video_duration = int_or_none(config["video"].get("duration"))
|
video_duration = int_or_none(config["video"].get("duration"))
|
||||||
@ -529,3 +540,58 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
|
return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
|
||||||
|
|
||||||
|
|
||||||
|
class VimeoLikesIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
|
||||||
|
IE_NAME = 'vimeo:likes'
|
||||||
|
IE_DESC = 'Vimeo user likes'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'https://vimeo.com/user755559/likes/',
|
||||||
|
'playlist_mincount': 293,
|
||||||
|
"info_dict": {
|
||||||
|
"description": "See all the videos urza likes",
|
||||||
|
"title": 'Videos urza likes',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
user_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, user_id)
|
||||||
|
page_count = self._int(
|
||||||
|
self._search_regex(
|
||||||
|
r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
|
||||||
|
.*?</a></li>\s*<li\s+class="pagination_next">
|
||||||
|
''', webpage, 'page count'),
|
||||||
|
'page count', fatal=True)
|
||||||
|
PAGE_SIZE = 12
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
|
||||||
|
description = self._html_search_meta('description', webpage)
|
||||||
|
|
||||||
|
def _get_page(idx):
|
||||||
|
page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
|
||||||
|
self.http_scheme(), user_id, idx + 1)
|
||||||
|
webpage = self._download_webpage(
|
||||||
|
page_url, user_id,
|
||||||
|
note='Downloading page %d/%d' % (idx + 1, page_count))
|
||||||
|
video_list = self._search_regex(
|
||||||
|
r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
|
||||||
|
webpage, 'video content')
|
||||||
|
paths = re.findall(
|
||||||
|
r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
|
||||||
|
for path in paths:
|
||||||
|
yield {
|
||||||
|
'_type': 'url',
|
||||||
|
'url': compat_urlparse.urljoin(page_url, path),
|
||||||
|
}
|
||||||
|
|
||||||
|
pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'id': 'user%s_likes' % user_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'entries': pl,
|
||||||
|
}
|
||||||
|
@ -6,6 +6,7 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
compat_str,
|
compat_str,
|
||||||
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -16,6 +17,24 @@ class VubeIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
|
'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
|
||||||
|
'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'Y8NUZ69Tf7',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Best Drummer Ever [HD]',
|
||||||
|
'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'uploader': 'William',
|
||||||
|
'timestamp': 1406876915,
|
||||||
|
'upload_date': '20140801',
|
||||||
|
'duration': 258.051,
|
||||||
|
'like_count': int,
|
||||||
|
'dislike_count': int,
|
||||||
|
'comment_count': int,
|
||||||
|
'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
|
||||||
|
},
|
||||||
|
}, {
|
||||||
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
|
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
|
||||||
'md5': 'db7aba89d4603dadd627e9d1973946fe',
|
'md5': 'db7aba89d4603dadd627e9d1973946fe',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -32,7 +51,8 @@ class VubeIE(InfoExtractor):
|
|||||||
'dislike_count': int,
|
'dislike_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
|
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
|
||||||
}
|
},
|
||||||
|
'skip': 'Removed due to DMCA',
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
|
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
|
||||||
@ -51,7 +71,8 @@ class VubeIE(InfoExtractor):
|
|||||||
'dislike_count': int,
|
'dislike_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
|
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
|
||||||
}
|
},
|
||||||
|
'skip': 'Removed due to DMCA',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
|
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
|
||||||
'md5': '0584fc13b50f887127d9d1007589d27f',
|
'md5': '0584fc13b50f887127d9d1007589d27f',
|
||||||
@ -69,7 +90,8 @@ class VubeIE(InfoExtractor):
|
|||||||
'dislike_count': int,
|
'dislike_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
|
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
|
||||||
}
|
},
|
||||||
|
'skip': 'Removed due to DMCA',
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -102,6 +124,11 @@ class VubeIE(InfoExtractor):
|
|||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
if not formats and video.get('vst') == 'dmca':
|
||||||
|
raise ExtractorError(
|
||||||
|
'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
|
||||||
|
expected=True)
|
||||||
|
|
||||||
title = video['title']
|
title = video['title']
|
||||||
description = video.get('description')
|
description = video.get('description')
|
||||||
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
|
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
|
||||||
|
@ -5,6 +5,7 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
|
ExtractorError,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
qualities,
|
qualities,
|
||||||
)
|
)
|
||||||
@ -14,13 +15,12 @@ class VuClipIE(InfoExtractor):
|
|||||||
_VALID_URL = r'http://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)'
|
_VALID_URL = r'http://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://m.vuclip.com/w?cid=843902317&fid=63532&z=1007&nvar&frm=index.html&bu=4757321434',
|
'url': 'http://m.vuclip.com/w?cid=922692425&fid=70295&z=1010&nvar&frm=index.html',
|
||||||
'md5': '92ac9d1ccefec4f0bb474661ab144fcf',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '843902317',
|
'id': '922692425',
|
||||||
'ext': '3gp',
|
'ext': '3gp',
|
||||||
'title': 'Movie Trailer: Noah',
|
'title': 'The Toy Soldiers - Hollywood Movie Trailer',
|
||||||
'duration': 139,
|
'duration': 180,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,16 +37,32 @@ class VuClipIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
adfree_url, video_id, note='Download post-ad page')
|
adfree_url, video_id, note='Download post-ad page')
|
||||||
|
|
||||||
|
error_msg = self._html_search_regex(
|
||||||
|
r'<p class="message">(.*?)</p>', webpage, 'error message',
|
||||||
|
default=None)
|
||||||
|
if error_msg:
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s said: %s' % (self.IE_NAME, error_msg), expected=True)
|
||||||
|
|
||||||
|
# These clowns alternate between two page types
|
||||||
links_code = self._search_regex(
|
links_code = self._search_regex(
|
||||||
r'(?s)<div class="social align_c".*?>(.*?)<hr\s*/?>', webpage,
|
r'''(?xs)
|
||||||
'links')
|
(?:
|
||||||
|
<img\s+src="/im/play.gif".*?>|
|
||||||
|
<!--\ player\ end\ -->\s*</div><!--\ thumb\ end-->
|
||||||
|
)
|
||||||
|
(.*?)
|
||||||
|
(?:
|
||||||
|
<a\s+href="fblike|<div\s+class="social">
|
||||||
|
)
|
||||||
|
''', webpage, 'links')
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip()
|
r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip()
|
||||||
|
|
||||||
quality_order = qualities(['Reg', 'Hi'])
|
quality_order = qualities(['Reg', 'Hi'])
|
||||||
formats = []
|
formats = []
|
||||||
for url, q in re.findall(
|
for url, q in re.findall(
|
||||||
r'<a href="(?P<url>[^"]+)".*?>(?P<q>[^<]+)</a>', links_code):
|
r'<a\s+href="(?P<url>[^"]+)".*?>(?:<button[^>]*>)?(?P<q>[^<]+)(?:</button>)?</a>', links_code):
|
||||||
format_id = compat_urllib_parse_urlparse(url).scheme + '-' + q
|
format_id = compat_urllib_parse_urlparse(url).scheme + '-' + q
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
@ -56,7 +72,7 @@ class VuClipIE(InfoExtractor):
|
|||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
duration = parse_duration(self._search_regex(
|
duration = parse_duration(self._search_regex(
|
||||||
r'\(([0-9:]+)\)</span></h1>', webpage, 'duration', fatal=False))
|
r'\(([0-9:]+)\)</span>', webpage, 'duration', fatal=False))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
89
youtube_dl/extractor/walla.py
Normal file
89
youtube_dl/extractor/walla.py
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
xpath_text,
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WallaIE(SubtitlesInfoExtractor):
|
||||||
|
_VALID_URL = r'http://vod\.walla\.co\.il/[^/]+/(?P<id>\d+)/(?P<display_id>.+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '2642630',
|
||||||
|
'display_id': 'one-direction-all-for-one',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'וואן דיירקשן: ההיסטריה',
|
||||||
|
'description': 'md5:de9e2512a92442574cdb0913c49bc4d8',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
'duration': 3600,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# rtmp download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_SUBTITLE_LANGS = {
|
||||||
|
'עברית': 'heb',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
display_id = mobj.group('display_id')
|
||||||
|
|
||||||
|
video = self._download_xml(
|
||||||
|
'http://video2.walla.co.il/?w=null/null/%s/@@/video/flv_pl' % video_id,
|
||||||
|
display_id)
|
||||||
|
|
||||||
|
item = video.find('./items/item')
|
||||||
|
|
||||||
|
title = xpath_text(item, './title', 'title')
|
||||||
|
description = xpath_text(item, './synopsis', 'description')
|
||||||
|
thumbnail = xpath_text(item, './preview_pic', 'thumbnail')
|
||||||
|
duration = int_or_none(xpath_text(item, './duration', 'duration'))
|
||||||
|
|
||||||
|
subtitles = {}
|
||||||
|
for subtitle in item.findall('./subtitles/subtitle'):
|
||||||
|
lang = xpath_text(subtitle, './title')
|
||||||
|
subtitles[self._SUBTITLE_LANGS.get(lang, lang)] = xpath_text(subtitle, './src')
|
||||||
|
|
||||||
|
if self._downloader.params.get('listsubtitles', False):
|
||||||
|
self._list_available_subtitles(video_id, subtitles)
|
||||||
|
return
|
||||||
|
|
||||||
|
subtitles = self.extract_subtitles(video_id, subtitles)
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for quality in item.findall('./qualities/quality'):
|
||||||
|
format_id = xpath_text(quality, './title')
|
||||||
|
fmt = {
|
||||||
|
'url': 'rtmp://wafla.walla.co.il/vod',
|
||||||
|
'play_path': xpath_text(quality, './src'),
|
||||||
|
'player_url': 'http://isc.walla.co.il/w9/swf/video_swf/vod/WallaMediaPlayerAvod.swf',
|
||||||
|
'page_url': url,
|
||||||
|
'ext': 'flv',
|
||||||
|
'format_id': xpath_text(quality, './title'),
|
||||||
|
}
|
||||||
|
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
|
||||||
|
if m:
|
||||||
|
fmt['height'] = int(m.group('height'))
|
||||||
|
formats.append(fmt)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'duration': duration,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
@ -40,6 +40,7 @@ class WatIE(InfoExtractor):
|
|||||||
'upload_date': '20140816',
|
'upload_date': '20140816',
|
||||||
'duration': 2910,
|
'duration': 2910,
|
||||||
},
|
},
|
||||||
|
'skip': "Ce contenu n'est pas disponible pour l'instant.",
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError, compat_urllib_request
|
||||||
|
|
||||||
|
|
||||||
class WistiaIE(InfoExtractor):
|
class WistiaIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
|
_VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
|
||||||
|
_API_URL = 'http://fast.wistia.com/embed/medias/{0:}.json'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
|
'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
|
||||||
@ -24,11 +25,13 @@ class WistiaIE(InfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
request = compat_urllib_request.Request(self._API_URL.format(video_id))
|
||||||
data_json = self._html_search_regex(
|
request.add_header('Referer', url) # Some videos require this.
|
||||||
r'Wistia\.iframeInit\((.*?), {}\);', webpage, 'video data')
|
data_json = self._download_json(request, video_id)
|
||||||
|
if data_json.get('error'):
|
||||||
data = json.loads(data_json)
|
raise ExtractorError('Error while getting the playlist',
|
||||||
|
expected=True)
|
||||||
|
data = data_json['media']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
thumbnails = []
|
thumbnails = []
|
||||||
|
@ -13,37 +13,35 @@ class WorldStarHipHopIE(InfoExtractor):
|
|||||||
"info_dict": {
|
"info_dict": {
|
||||||
"id": "wshh6a7q1ny0G34ZwuIO",
|
"id": "wshh6a7q1ny0G34ZwuIO",
|
||||||
"ext": "mp4",
|
"ext": "mp4",
|
||||||
"title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
|
"title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
m = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = m.group('id')
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
webpage_src = self._download_webpage(url, video_id)
|
m_vevo_id = re.search(r'videoId=(.*?)&?', webpage)
|
||||||
|
|
||||||
m_vevo_id = re.search(r'videoId=(.*?)&?',
|
|
||||||
webpage_src)
|
|
||||||
if m_vevo_id is not None:
|
if m_vevo_id is not None:
|
||||||
return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo')
|
return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo')
|
||||||
|
|
||||||
video_url = self._search_regex(
|
video_url = self._search_regex(
|
||||||
r'so\.addVariable\("file","(.*?)"\)', webpage_src, 'video URL')
|
r'so\.addVariable\("file","(.*?)"\)', webpage, 'video URL')
|
||||||
|
|
||||||
if 'youtube' in video_url:
|
if 'youtube' in video_url:
|
||||||
return self.url_result(video_url, ie='Youtube')
|
return self.url_result(video_url, ie='Youtube')
|
||||||
|
|
||||||
video_title = self._html_search_regex(
|
video_title = self._html_search_regex(
|
||||||
r"<title>(.*)</title>", webpage_src, 'title')
|
r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>',
|
||||||
|
webpage, 'title')
|
||||||
|
|
||||||
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
|
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
|
||||||
thumbnail = self._html_search_regex(
|
thumbnail = self._html_search_regex(
|
||||||
r'rel="image_src" href="(.*)" />', webpage_src, 'thumbnail',
|
r'rel="image_src" href="(.*)" />', webpage, 'thumbnail',
|
||||||
fatal=False)
|
fatal=False)
|
||||||
if not thumbnail:
|
if not thumbnail:
|
||||||
_title = r"""candytitles.*>(.*)</span>"""
|
_title = r'candytitles.*>(.*)</span>'
|
||||||
mobj = re.search(_title, webpage_src)
|
mobj = re.search(_title, webpage)
|
||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
video_title = mobj.group(1)
|
video_title = mobj.group(1)
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
@ -6,6 +7,7 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor, SearchInfoExtractor
|
from .common import InfoExtractor, SearchInfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
clean_html,
|
clean_html,
|
||||||
@ -15,7 +17,7 @@ from ..utils import (
|
|||||||
|
|
||||||
class YahooIE(InfoExtractor):
|
class YahooIE(InfoExtractor):
|
||||||
IE_DESC = 'Yahoo screen and movies'
|
IE_DESC = 'Yahoo screen and movies'
|
||||||
_VALID_URL = r'(?P<url>https?://(?:screen|movies)\.yahoo\.com/.*?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
|
_VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+?)-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
|
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
|
||||||
@ -25,6 +27,7 @@ class YahooIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
|
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
|
||||||
'description': 'Julian and Travis watch Julian Smith',
|
'description': 'Julian and Travis watch Julian Smith',
|
||||||
|
'duration': 6863,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -34,19 +37,10 @@ class YahooIE(InfoExtractor):
|
|||||||
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
|
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
|
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
|
||||||
'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
|
'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
|
||||||
|
'duration': 151,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
'url': 'https://movies.yahoo.com/video/world-loves-spider-man-190819223.html',
|
|
||||||
'md5': '410b7104aa9893b765bc22787a22f3d9',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '516ed8e2-2c4f-339f-a211-7a8b49d30845',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'The World Loves Spider-Man',
|
|
||||||
'description': '''People all over the world are celebrating the release of \"The Amazing Spider-Man 2.\" We're taking a look at the enthusiastic response Spider-Man has received from viewers all over the world.''',
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
|
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
|
||||||
'md5': '60e8ac193d8fb71997caa8fce54c6460',
|
'md5': '60e8ac193d8fb71997caa8fce54c6460',
|
||||||
@ -55,15 +49,95 @@ class YahooIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "Yahoo Saves 'Community'",
|
'title': "Yahoo Saves 'Community'",
|
||||||
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
|
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
|
||||||
|
'duration': 170,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
'url': 'https://tw.screen.yahoo.com/taipei-opinion-poll/選情站報-街頭民調-台北市篇-102823042.html',
|
||||||
|
'md5': '92a7fdd8a08783c68a174d7aa067dde8',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '7a23b569-7bea-36cb-85b9-bd5301a0a1fb',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '選情站報 街頭民調 台北市篇',
|
||||||
|
'description': '選情站報 街頭民調 台北市篇',
|
||||||
|
'duration': 429,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
|
||||||
|
'md5': '0b51660361f0e27c9789e7037ef76f4b',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
|
||||||
|
'description': 'md5:f66c890e1490f4910a9953c941dee944',
|
||||||
|
'duration': 97,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
|
||||||
|
'md5': '57e06440778b1828a6079d2f744212c4',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Program that makes hockey more affordable not offered in Manitoba',
|
||||||
|
'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
|
||||||
|
'duration': 121,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://ca.finance.yahoo.com/news/20-most-valuable-brands-world-112600775.html',
|
||||||
|
'md5': '3e401e4eed6325aa29d9b96125fd5b4f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'c1b4c09c-8ed8-3b65-8b05-169c55358a83',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': "Apple Is The World's Most Valuable Brand",
|
||||||
|
'description': 'md5:73eabc1a11c6f59752593b2ceefa1262',
|
||||||
|
'duration': 21,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
|
||||||
|
'md5': '67010fdf3a08d290e060a4dd96baa07b',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'China Moses Is Crazy About the Blues',
|
||||||
|
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
|
||||||
|
'duration': 128,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
|
||||||
|
'md5': 'd9a083ccf1379127bf25699d67e4791b',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Connect the Dots: Dark Side of Virgo',
|
||||||
|
'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
|
||||||
|
'duration': 201,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.group('id')
|
display_id = mobj.group('display_id')
|
||||||
url = mobj.group('url')
|
url = mobj.group('url')
|
||||||
webpage = self._download_webpage(url, video_id)
|
host = mobj.group('host')
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
# Look for iframed media first
|
||||||
|
iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
|
||||||
|
if iframe_m:
|
||||||
|
iframepage = self._download_webpage(
|
||||||
|
host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
|
||||||
|
items_json = self._search_regex(
|
||||||
|
r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
|
||||||
|
if items_json:
|
||||||
|
items = json.loads(items_json)
|
||||||
|
video_id = items[0]['id']
|
||||||
|
return self._get_info(video_id, display_id, webpage)
|
||||||
|
|
||||||
items_json = self._search_regex(
|
items_json = self._search_regex(
|
||||||
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
|
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
|
||||||
@ -74,20 +148,22 @@ class YahooIE(InfoExtractor):
|
|||||||
r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
|
r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
|
||||||
r'"first_videoid"\s*:\s*"([^"]+)"',
|
r'"first_videoid"\s*:\s*"([^"]+)"',
|
||||||
]
|
]
|
||||||
long_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
|
video_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
|
||||||
video_id = long_id
|
|
||||||
else:
|
else:
|
||||||
items = json.loads(items_json)
|
items = json.loads(items_json)
|
||||||
info = items['mediaItems']['query']['results']['mediaObj'][0]
|
info = items['mediaItems']['query']['results']['mediaObj'][0]
|
||||||
# The 'meta' field is not always in the video webpage, we request it
|
# The 'meta' field is not always in the video webpage, we request it
|
||||||
# from another page
|
# from another page
|
||||||
long_id = info['id']
|
video_id = info['id']
|
||||||
return self._get_info(long_id, video_id, webpage)
|
return self._get_info(video_id, display_id, webpage)
|
||||||
|
|
||||||
def _get_info(self, long_id, video_id, webpage):
|
def _get_info(self, video_id, display_id, webpage):
|
||||||
|
region = self._search_regex(
|
||||||
|
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
|
||||||
|
webpage, 'region', fatal=False, default='US')
|
||||||
query = ('SELECT * FROM yahoo.media.video.streams WHERE id="%s"'
|
query = ('SELECT * FROM yahoo.media.video.streams WHERE id="%s"'
|
||||||
' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="US"'
|
' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="%s"'
|
||||||
' AND protocol="http"' % long_id)
|
' AND protocol="http"' % (video_id, region))
|
||||||
data = compat_urllib_parse.urlencode({
|
data = compat_urllib_parse.urlencode({
|
||||||
'q': query,
|
'q': query,
|
||||||
'env': 'prod',
|
'env': 'prod',
|
||||||
@ -95,9 +171,17 @@ class YahooIE(InfoExtractor):
|
|||||||
})
|
})
|
||||||
query_result = self._download_json(
|
query_result = self._download_json(
|
||||||
'http://video.query.yahoo.com/v1/public/yql?' + data,
|
'http://video.query.yahoo.com/v1/public/yql?' + data,
|
||||||
video_id, 'Downloading video info')
|
display_id, 'Downloading video info')
|
||||||
|
|
||||||
info = query_result['query']['results']['mediaObj'][0]
|
info = query_result['query']['results']['mediaObj'][0]
|
||||||
meta = info['meta']
|
meta = info.get('meta')
|
||||||
|
|
||||||
|
if not meta:
|
||||||
|
msg = info['status'].get('msg')
|
||||||
|
if msg:
|
||||||
|
raise ExtractorError(
|
||||||
|
'%s returned error: %s' % (self.IE_NAME, msg), expected=True)
|
||||||
|
raise ExtractorError('Unable to extract media object meta')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for s in info['streams']:
|
for s in info['streams']:
|
||||||
@ -124,36 +208,15 @@ class YahooIE(InfoExtractor):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
'display_id': display_id,
|
||||||
'title': meta['title'],
|
'title': meta['title'],
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'description': clean_html(meta['description']),
|
'description': clean_html(meta['description']),
|
||||||
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
|
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
|
||||||
|
'duration': int_or_none(meta.get('duration')),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class YahooNewsIE(YahooIE):
|
|
||||||
IE_NAME = 'yahoo:news'
|
|
||||||
_VALID_URL = r'http://news\.yahoo\.com/video/.*?-(?P<id>\d*?)\.html'
|
|
||||||
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
|
|
||||||
'md5': '67010fdf3a08d290e060a4dd96baa07b',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '104538833',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'China Moses Is Crazy About the Blues',
|
|
||||||
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
long_id = self._search_regex(r'contentId: \'(.+?)\',', webpage, 'long id')
|
|
||||||
return self._get_info(long_id, video_id, webpage)
|
|
||||||
|
|
||||||
|
|
||||||
class YahooSearchIE(SearchInfoExtractor):
|
class YahooSearchIE(SearchInfoExtractor):
|
||||||
IE_DESC = 'Yahoo screen search'
|
IE_DESC = 'Yahoo screen search'
|
||||||
_MAX_RESULTS = 1000
|
_MAX_RESULTS = 1000
|
||||||
|
52
youtube_dl/extractor/ynet.py
Normal file
52
youtube_dl/extractor/ynet.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import compat_urllib_parse
|
||||||
|
|
||||||
|
|
||||||
|
class YnetIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://(?:.+?\.)?ynet\.co\.il/(?:.+?/)?0,7340,(?P<id>L(?:-[0-9]+)+),00\.html'
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
|
||||||
|
'md5': '4b29cb57c3dddd57642b3f051f535b07',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'L-11659-99244',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'איש לא יודע מאיפה באנו',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
|
||||||
|
'md5': '8194c2ea221e9a639cac96b6b0753dc5',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'L-8859-84418',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': "צפו: הנשיקה הלוהטת של תורגי' ויוליה פלוטקין",
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
content = compat_urllib_parse.unquote_plus(self._og_search_video_url(webpage))
|
||||||
|
config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
|
||||||
|
f4m_url = config['clip']['url']
|
||||||
|
title = self._og_search_title(webpage)
|
||||||
|
m = re.search(r'ynet - HOT -- (["\']+)(?P<title>.+?)\1', title)
|
||||||
|
if m:
|
||||||
|
title = m.group('title')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': self._extract_f4m_formats(f4m_url, video_id),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
}
|
@ -1,6 +1,7 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
|
|
||||||
import json
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
@ -13,18 +14,25 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class YoukuIE(InfoExtractor):
|
class YoukuIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:(?:http://)?(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|youku:)(?P<ID>[A-Za-z0-9]+)(?:\.html|/v\.swf|)'
|
_VALID_URL = r'''(?x)
|
||||||
_TEST = {
|
(?:
|
||||||
u"url": u"http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
|
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
|
||||||
u"file": u"XNDgyMDQ2NTQw_part00.flv",
|
youku:)
|
||||||
u"md5": u"ffe3f2e435663dc2d1eea34faeff5b5b",
|
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
|
||||||
u"params": {u"test": False},
|
'''
|
||||||
u"info_dict": {
|
_TEST = {
|
||||||
u"title": u"youtube-dl test video \"'/\\ä↭𝕐"
|
'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html',
|
||||||
|
'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b',
|
||||||
|
'params': {
|
||||||
|
'test': False
|
||||||
|
},
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'XNDgyMDQ2NTQw_part00',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'youtube-dl test video "\'/\\ä↭𝕐'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def _gen_sid(self):
|
def _gen_sid(self):
|
||||||
nowTime = int(time.time() * 1000)
|
nowTime = int(time.time() * 1000)
|
||||||
random1 = random.randint(1000,1998)
|
random1 = random.randint(1000,1998)
|
||||||
@ -55,49 +63,42 @@ class YoukuIE(InfoExtractor):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
video_id = mobj.group('id')
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
video_id = mobj.group('ID')
|
|
||||||
|
|
||||||
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
|
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
|
||||||
|
|
||||||
jsondata = self._download_webpage(info_url, video_id)
|
config = self._download_json(info_url, video_id)
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
error_code = config['data'][0].get('error_code')
|
||||||
try:
|
if error_code:
|
||||||
config = json.loads(jsondata)
|
# -8 means blocked outside China.
|
||||||
error_code = config['data'][0].get('error_code')
|
error = config['data'][0].get('error') # Chinese and English, separated by newline.
|
||||||
if error_code:
|
raise ExtractorError(error or 'Server reported error %i' % error_code,
|
||||||
# -8 means blocked outside China.
|
expected=True)
|
||||||
error = config['data'][0].get('error') # Chinese and English, separated by newline.
|
|
||||||
raise ExtractorError(error or u'Server reported error %i' % error_code,
|
|
||||||
expected=True)
|
|
||||||
|
|
||||||
video_title = config['data'][0]['title']
|
video_title = config['data'][0]['title']
|
||||||
seed = config['data'][0]['seed']
|
seed = config['data'][0]['seed']
|
||||||
|
|
||||||
format = self._downloader.params.get('format', None)
|
format = self._downloader.params.get('format', None)
|
||||||
supported_format = list(config['data'][0]['streamfileids'].keys())
|
supported_format = list(config['data'][0]['streamfileids'].keys())
|
||||||
|
|
||||||
if format is None or format == 'best':
|
# TODO proper format selection
|
||||||
if 'hd2' in supported_format:
|
if format is None or format == 'best':
|
||||||
format = 'hd2'
|
if 'hd2' in supported_format:
|
||||||
else:
|
format = 'hd2'
|
||||||
format = 'flv'
|
|
||||||
ext = u'flv'
|
|
||||||
elif format == 'worst':
|
|
||||||
format = 'mp4'
|
|
||||||
ext = u'mp4'
|
|
||||||
else:
|
else:
|
||||||
format = 'flv'
|
format = 'flv'
|
||||||
ext = u'flv'
|
ext = 'flv'
|
||||||
|
elif format == 'worst':
|
||||||
|
format = 'mp4'
|
||||||
|
ext = 'mp4'
|
||||||
|
else:
|
||||||
|
format = 'flv'
|
||||||
|
ext = 'flv'
|
||||||
|
|
||||||
|
fileid = config['data'][0]['streamfileids'][format]
|
||||||
fileid = config['data'][0]['streamfileids'][format]
|
keys = [s['k'] for s in config['data'][0]['segs'][format]]
|
||||||
keys = [s['k'] for s in config['data'][0]['segs'][format]]
|
# segs is usually a dictionary, but an empty *list* if an error occured.
|
||||||
# segs is usually a dictionary, but an empty *list* if an error occured.
|
|
||||||
except (UnicodeDecodeError, ValueError, KeyError):
|
|
||||||
raise ExtractorError(u'Unable to extract info section')
|
|
||||||
|
|
||||||
files_info=[]
|
files_info=[]
|
||||||
sid = self._gen_sid()
|
sid = self._gen_sid()
|
||||||
@ -106,9 +107,8 @@ class YoukuIE(InfoExtractor):
|
|||||||
#column 8,9 of fileid represent the segment number
|
#column 8,9 of fileid represent the segment number
|
||||||
#fileid[7:9] should be changed
|
#fileid[7:9] should be changed
|
||||||
for index, key in enumerate(keys):
|
for index, key in enumerate(keys):
|
||||||
|
|
||||||
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
|
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
|
||||||
download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
'id': '%s_part%02d' % (video_id, index),
|
'id': '%s_part%02d' % (video_id, index),
|
||||||
|
@ -26,7 +26,7 @@ from ..utils import (
|
|||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
PagedList,
|
OnDemandPagedList,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
@ -286,6 +286,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
|
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
|
||||||
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
|
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
|
||||||
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
|
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
|
||||||
|
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
|
||||||
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
@ -655,6 +656,16 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
|
|
||||||
# Get video webpage
|
# Get video webpage
|
||||||
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
|
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
|
||||||
|
pref_cookies = [
|
||||||
|
c for c in self._downloader.cookiejar
|
||||||
|
if c.domain == '.youtube.com' and c.name == 'PREF']
|
||||||
|
for pc in pref_cookies:
|
||||||
|
if 'hl=' in pc.value:
|
||||||
|
pc.value = re.sub(r'hl=[^&]+', 'hl=en', pc.value)
|
||||||
|
else:
|
||||||
|
if pc.value:
|
||||||
|
pc.value += '&'
|
||||||
|
pc.value += 'hl=en'
|
||||||
video_webpage = self._download_webpage(url, video_id)
|
video_webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
# Attempt to extract SWF player URL
|
# Attempt to extract SWF player URL
|
||||||
@ -928,7 +939,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
|
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
|
||||||
|
|
||||||
# Look for the DASH manifest
|
# Look for the DASH manifest
|
||||||
if (self._downloader.params.get('youtube_include_dash_manifest', False)):
|
if self._downloader.params.get('youtube_include_dash_manifest', True):
|
||||||
try:
|
try:
|
||||||
# The DASH manifest used needs to be the one from the original video_webpage.
|
# The DASH manifest used needs to be the one from the original video_webpage.
|
||||||
# The one found in get_video_info seems to be using different signatures.
|
# The one found in get_video_info seems to be using different signatures.
|
||||||
@ -1068,6 +1079,13 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'title': 'JODA15',
|
'title': 'JODA15',
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
'note': 'Embedded SWF player',
|
||||||
|
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
|
||||||
|
'playlist_count': 4,
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'JODA7',
|
||||||
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
@ -1334,7 +1352,7 @@ class YoutubeUserIE(InfoExtractor):
|
|||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
}
|
}
|
||||||
url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
|
url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
|
||||||
|
|
||||||
return self.playlist_result(url_results, playlist_title=username)
|
return self.playlist_result(url_results, playlist_title=username)
|
||||||
|
|
||||||
|
@ -75,7 +75,8 @@ def parseOpts(overrideArguments=None):
|
|||||||
if len(opts) > 1:
|
if len(opts) > 1:
|
||||||
opts.insert(1, ', ')
|
opts.insert(1, ', ')
|
||||||
|
|
||||||
if option.takes_value(): opts.append(' %s' % option.metavar)
|
if option.takes_value():
|
||||||
|
opts.append(' %s' % option.metavar)
|
||||||
|
|
||||||
return "".join(opts)
|
return "".join(opts)
|
||||||
|
|
||||||
@ -87,68 +88,69 @@ def parseOpts(overrideArguments=None):
|
|||||||
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
||||||
try:
|
try:
|
||||||
i = opts.index(private_opt)
|
i = opts.index(private_opt)
|
||||||
opts[i+1] = '<PRIVATE>'
|
opts[i + 1] = 'PRIVATE'
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
return opts
|
return opts
|
||||||
|
|
||||||
max_width = 80
|
|
||||||
max_help_position = 80
|
|
||||||
|
|
||||||
# No need to wrap help messages if we're on a wide console
|
# No need to wrap help messages if we're on a wide console
|
||||||
columns = get_term_width()
|
columns = get_term_width()
|
||||||
if columns: max_width = columns
|
max_width = columns if columns else 80
|
||||||
|
max_help_position = 80
|
||||||
|
|
||||||
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
|
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
|
||||||
fmt.format_option_strings = _format_option_string
|
fmt.format_option_strings = _format_option_string
|
||||||
|
|
||||||
kw = {
|
kw = {
|
||||||
'version' : __version__,
|
'version': __version__,
|
||||||
'formatter' : fmt,
|
'formatter': fmt,
|
||||||
'usage' : '%prog [options] url [url...]',
|
'usage': '%prog [options] url [url...]',
|
||||||
'conflict_handler' : 'resolve',
|
'conflict_handler': 'resolve',
|
||||||
}
|
}
|
||||||
|
|
||||||
parser = optparse.OptionParser(**kw)
|
parser = optparse.OptionParser(**kw)
|
||||||
|
|
||||||
# option groups
|
general = optparse.OptionGroup(parser, 'General Options')
|
||||||
general = optparse.OptionGroup(parser, 'General Options')
|
|
||||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
|
||||||
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
|
||||||
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
|
||||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
|
||||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
|
||||||
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
|
||||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
|
||||||
workarounds = optparse.OptionGroup(parser, 'Workarounds')
|
|
||||||
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
|
||||||
|
|
||||||
general.add_option('-h', '--help',
|
|
||||||
action='help', help='print this help text and exit')
|
|
||||||
general.add_option('-v', '--version',
|
|
||||||
action='version', help='print program version and exit')
|
|
||||||
general.add_option('-U', '--update',
|
|
||||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
|
||||||
general.add_option('-i', '--ignore-errors',
|
|
||||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
|
|
||||||
general.add_option('--abort-on-error',
|
|
||||||
action='store_false', dest='ignoreerrors',
|
|
||||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
|
||||||
general.add_option('--dump-user-agent',
|
|
||||||
action='store_true', dest='dump_user_agent',
|
|
||||||
help='display the current browser identification', default=False)
|
|
||||||
general.add_option('--list-extractors',
|
|
||||||
action='store_true', dest='list_extractors',
|
|
||||||
help='List all supported extractors and the URLs they would handle', default=False)
|
|
||||||
general.add_option('--extractor-descriptions',
|
|
||||||
action='store_true', dest='list_extractor_descriptions',
|
|
||||||
help='Output descriptions of all supported extractors', default=False)
|
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--proxy', dest='proxy', default=None, metavar='URL',
|
'-h', '--help',
|
||||||
|
action='help',
|
||||||
|
help='print this help text and exit')
|
||||||
|
general.add_option(
|
||||||
|
'-v', '--version',
|
||||||
|
action='version',
|
||||||
|
help='print program version and exit')
|
||||||
|
general.add_option(
|
||||||
|
'-U', '--update',
|
||||||
|
action='store_true', dest='update_self',
|
||||||
|
help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||||
|
general.add_option(
|
||||||
|
'-i', '--ignore-errors',
|
||||||
|
action='store_true', dest='ignoreerrors', default=False,
|
||||||
|
help='continue on download errors, for example to skip unavailable videos in a playlist')
|
||||||
|
general.add_option(
|
||||||
|
'--abort-on-error',
|
||||||
|
action='store_false', dest='ignoreerrors',
|
||||||
|
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
||||||
|
general.add_option(
|
||||||
|
'--dump-user-agent',
|
||||||
|
action='store_true', dest='dump_user_agent', default=False,
|
||||||
|
help='display the current browser identification')
|
||||||
|
general.add_option(
|
||||||
|
'--list-extractors',
|
||||||
|
action='store_true', dest='list_extractors', default=False,
|
||||||
|
help='List all supported extractors and the URLs they would handle')
|
||||||
|
general.add_option(
|
||||||
|
'--extractor-descriptions',
|
||||||
|
action='store_true', dest='list_extractor_descriptions', default=False,
|
||||||
|
help='Output descriptions of all supported extractors')
|
||||||
|
general.add_option(
|
||||||
|
'--proxy', dest='proxy',
|
||||||
|
default=None, metavar='URL',
|
||||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--socket-timeout', dest='socket_timeout',
|
'--socket-timeout',
|
||||||
type=float, default=None, help=u'Time to wait before giving up, in seconds')
|
dest='socket_timeout', type=float, default=None,
|
||||||
|
help='Time to wait before giving up, in seconds')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'--default-search',
|
'--default-search',
|
||||||
dest='default_search', metavar='PREFIX',
|
dest='default_search', metavar='PREFIX',
|
||||||
@ -158,6 +160,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
||||||
|
|
||||||
|
selection = optparse.OptionGroup(parser, 'Video Selection')
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--playlist-start',
|
'--playlist-start',
|
||||||
dest='playliststart', metavar='NUMBER', default=1, type=int,
|
dest='playliststart', metavar='NUMBER', default=1, type=int,
|
||||||
@ -166,245 +169,371 @@ def parseOpts(overrideArguments=None):
|
|||||||
'--playlist-end',
|
'--playlist-end',
|
||||||
dest='playlistend', metavar='NUMBER', default=None, type=int,
|
dest='playlistend', metavar='NUMBER', default=None, type=int,
|
||||||
help='playlist video to end at (default is last)')
|
help='playlist video to end at (default is last)')
|
||||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
|
||||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
|
||||||
selection.add_option('--max-downloads', metavar='NUMBER',
|
|
||||||
dest='max_downloads', type=int, default=None,
|
|
||||||
help='Abort after downloading NUMBER files')
|
|
||||||
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
|
|
||||||
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
|
|
||||||
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
|
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--datebefore', metavar='DATE', dest='datebefore', default=None,
|
'--match-title',
|
||||||
|
dest='matchtitle', metavar='REGEX',
|
||||||
|
help='download only matching titles (regex or caseless sub-string)')
|
||||||
|
selection.add_option(
|
||||||
|
'--reject-title',
|
||||||
|
dest='rejecttitle', metavar='REGEX',
|
||||||
|
help='skip download for matching titles (regex or caseless sub-string)')
|
||||||
|
selection.add_option(
|
||||||
|
'--max-downloads',
|
||||||
|
dest='max_downloads', metavar='NUMBER', type=int, default=None,
|
||||||
|
help='Abort after downloading NUMBER files')
|
||||||
|
selection.add_option(
|
||||||
|
'--min-filesize',
|
||||||
|
metavar='SIZE', dest='min_filesize', default=None,
|
||||||
|
help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
|
||||||
|
selection.add_option(
|
||||||
|
'--max-filesize',
|
||||||
|
metavar='SIZE', dest='max_filesize', default=None,
|
||||||
|
help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
|
||||||
|
selection.add_option(
|
||||||
|
'--date',
|
||||||
|
metavar='DATE', dest='date', default=None,
|
||||||
|
help='download only videos uploaded in this date')
|
||||||
|
selection.add_option(
|
||||||
|
'--datebefore',
|
||||||
|
metavar='DATE', dest='datebefore', default=None,
|
||||||
help='download only videos uploaded on or before this date (i.e. inclusive)')
|
help='download only videos uploaded on or before this date (i.e. inclusive)')
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--dateafter', metavar='DATE', dest='dateafter', default=None,
|
'--dateafter',
|
||||||
|
metavar='DATE', dest='dateafter', default=None,
|
||||||
help='download only videos uploaded on or after this date (i.e. inclusive)')
|
help='download only videos uploaded on or after this date (i.e. inclusive)')
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--min-views', metavar='COUNT', dest='min_views',
|
'--min-views',
|
||||||
default=None, type=int,
|
metavar='COUNT', dest='min_views', default=None, type=int,
|
||||||
help="Do not download any videos with less than COUNT views",)
|
help='Do not download any videos with less than COUNT views',)
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--max-views', metavar='COUNT', dest='max_views',
|
'--max-views',
|
||||||
default=None, type=int,
|
metavar='COUNT', dest='max_views', default=None, type=int,
|
||||||
help="Do not download any videos with more than COUNT views",)
|
help='Do not download any videos with more than COUNT views')
|
||||||
selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
|
|
||||||
selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
|
|
||||||
help='download only videos suitable for the given age',
|
|
||||||
default=None, type=int)
|
|
||||||
selection.add_option('--download-archive', metavar='FILE',
|
|
||||||
dest='download_archive',
|
|
||||||
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
|
|
||||||
selection.add_option(
|
selection.add_option(
|
||||||
'--include-ads', dest='include_ads',
|
'--no-playlist',
|
||||||
action='store_true',
|
action='store_true', dest='noplaylist', default=False,
|
||||||
|
help='download only the currently playing video')
|
||||||
|
selection.add_option(
|
||||||
|
'--age-limit',
|
||||||
|
metavar='YEARS', dest='age_limit', default=None, type=int,
|
||||||
|
help='download only videos suitable for the given age')
|
||||||
|
selection.add_option(
|
||||||
|
'--download-archive', metavar='FILE',
|
||||||
|
dest='download_archive',
|
||||||
|
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
|
||||||
|
selection.add_option(
|
||||||
|
'--include-ads',
|
||||||
|
dest='include_ads', action='store_true',
|
||||||
help='Download advertisements as well (experimental)')
|
help='Download advertisements as well (experimental)')
|
||||||
selection.add_option(
|
|
||||||
'--youtube-include-dash-manifest', action='store_true',
|
|
||||||
dest='youtube_include_dash_manifest', default=False,
|
|
||||||
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
|
||||||
|
|
||||||
authentication.add_option('-u', '--username',
|
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
||||||
dest='username', metavar='USERNAME', help='account username')
|
authentication.add_option(
|
||||||
authentication.add_option('-p', '--password',
|
'-u', '--username',
|
||||||
dest='password', metavar='PASSWORD', help='account password')
|
dest='username', metavar='USERNAME',
|
||||||
authentication.add_option('-2', '--twofactor',
|
help='login with this account ID')
|
||||||
dest='twofactor', metavar='TWOFACTOR', help='two-factor auth code')
|
authentication.add_option(
|
||||||
authentication.add_option('-n', '--netrc',
|
'-p', '--password',
|
||||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
dest='password', metavar='PASSWORD',
|
||||||
authentication.add_option('--video-password',
|
help='account password')
|
||||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
authentication.add_option(
|
||||||
|
'-2', '--twofactor',
|
||||||
|
dest='twofactor', metavar='TWOFACTOR',
|
||||||
|
help='two-factor auth code')
|
||||||
|
authentication.add_option(
|
||||||
|
'-n', '--netrc',
|
||||||
|
action='store_true', dest='usenetrc', default=False,
|
||||||
|
help='use .netrc authentication data')
|
||||||
|
authentication.add_option(
|
||||||
|
'--video-password',
|
||||||
|
dest='videopassword', metavar='PASSWORD',
|
||||||
|
help='video password (vimeo, smotri)')
|
||||||
|
|
||||||
|
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
||||||
|
video_format.add_option(
|
||||||
|
'-f', '--format',
|
||||||
|
action='store', dest='format', metavar='FORMAT', default=None,
|
||||||
|
help='video format code, specify the order of preference using slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio')
|
||||||
|
video_format.add_option(
|
||||||
|
'--all-formats',
|
||||||
|
action='store_const', dest='format', const='all',
|
||||||
|
help='download all available video formats')
|
||||||
|
video_format.add_option(
|
||||||
|
'--prefer-free-formats',
|
||||||
|
action='store_true', dest='prefer_free_formats', default=False,
|
||||||
|
help='prefer free video formats unless a specific one is requested')
|
||||||
|
video_format.add_option(
|
||||||
|
'--max-quality',
|
||||||
|
action='store', dest='format_limit', metavar='FORMAT',
|
||||||
|
help='highest quality format to download')
|
||||||
|
video_format.add_option(
|
||||||
|
'-F', '--list-formats',
|
||||||
|
action='store_true', dest='listformats',
|
||||||
|
help='list all available formats')
|
||||||
|
video_format.add_option(
|
||||||
|
'--youtube-include-dash-manifest',
|
||||||
|
action='store_true', dest='youtube_include_dash_manifest', default=True,
|
||||||
|
help=optparse.SUPPRESS_HELP)
|
||||||
|
video_format.add_option(
|
||||||
|
'--youtube-skip-dash-manifest',
|
||||||
|
action='store_false', dest='youtube_include_dash_manifest',
|
||||||
|
help='Do not download the DASH manifest on YouTube videos')
|
||||||
|
|
||||||
video_format.add_option('-f', '--format',
|
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
||||||
action='store', dest='format', metavar='FORMAT', default=None,
|
subtitles.add_option(
|
||||||
help='video format code, specify the order of preference using slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio')
|
'--write-sub', '--write-srt',
|
||||||
video_format.add_option('--all-formats',
|
action='store_true', dest='writesubtitles', default=False,
|
||||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
help='write subtitle file')
|
||||||
video_format.add_option('--prefer-free-formats',
|
subtitles.add_option(
|
||||||
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
|
'--write-auto-sub', '--write-automatic-sub',
|
||||||
video_format.add_option('--max-quality',
|
action='store_true', dest='writeautomaticsub', default=False,
|
||||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
help='write automatic subtitle file (youtube only)')
|
||||||
video_format.add_option('-F', '--list-formats',
|
subtitles.add_option(
|
||||||
action='store_true', dest='listformats', help='list all available formats')
|
'--all-subs',
|
||||||
|
action='store_true', dest='allsubtitles', default=False,
|
||||||
|
help='downloads all the available subtitles of the video')
|
||||||
|
subtitles.add_option(
|
||||||
|
'--list-subs',
|
||||||
|
action='store_true', dest='listsubtitles', default=False,
|
||||||
|
help='lists all available subtitles for the video')
|
||||||
|
subtitles.add_option(
|
||||||
|
'--sub-format',
|
||||||
|
action='store', dest='subtitlesformat', metavar='FORMAT', default='srt',
|
||||||
|
help='subtitle format (default=srt) ([sbv/vtt] youtube only)')
|
||||||
|
subtitles.add_option(
|
||||||
|
'--sub-lang', '--sub-langs', '--srt-lang',
|
||||||
|
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||||
|
default=[], callback=_comma_separated_values_options_callback,
|
||||||
|
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
||||||
|
|
||||||
subtitles.add_option('--write-sub', '--write-srt',
|
downloader = optparse.OptionGroup(parser, 'Download Options')
|
||||||
action='store_true', dest='writesubtitles',
|
downloader.add_option(
|
||||||
help='write subtitle file', default=False)
|
'-r', '--rate-limit',
|
||||||
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
|
dest='ratelimit', metavar='LIMIT',
|
||||||
action='store_true', dest='writeautomaticsub',
|
help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
||||||
help='write automatic subtitle file (youtube only)', default=False)
|
downloader.add_option(
|
||||||
subtitles.add_option('--all-subs',
|
'-R', '--retries',
|
||||||
action='store_true', dest='allsubtitles',
|
dest='retries', metavar='RETRIES', default=10,
|
||||||
help='downloads all the available subtitles of the video', default=False)
|
help='number of retries (default is %default)')
|
||||||
subtitles.add_option('--list-subs',
|
downloader.add_option(
|
||||||
action='store_true', dest='listsubtitles',
|
'--buffer-size',
|
||||||
help='lists all available subtitles for the video', default=False)
|
dest='buffersize', metavar='SIZE', default='1024',
|
||||||
subtitles.add_option('--sub-format',
|
help='size of download buffer (e.g. 1024 or 16K) (default is %default)')
|
||||||
action='store', dest='subtitlesformat', metavar='FORMAT',
|
downloader.add_option(
|
||||||
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
'--no-resize-buffer',
|
||||||
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
|
action='store_true', dest='noresizebuffer', default=False,
|
||||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
|
||||||
default=[], callback=_comma_separated_values_options_callback,
|
downloader.add_option(
|
||||||
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
'--test',
|
||||||
|
action='store_true', dest='test', default=False,
|
||||||
downloader.add_option('-r', '--rate-limit',
|
help=optparse.SUPPRESS_HELP)
|
||||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
|
||||||
downloader.add_option('-R', '--retries',
|
|
||||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
|
||||||
downloader.add_option('--buffer-size',
|
|
||||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
|
||||||
downloader.add_option('--no-resize-buffer',
|
|
||||||
action='store_true', dest='noresizebuffer',
|
|
||||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
|
||||||
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
|
||||||
|
|
||||||
|
workarounds = optparse.OptionGroup(parser, 'Workarounds')
|
||||||
workarounds.add_option(
|
workarounds.add_option(
|
||||||
'--encoding', dest='encoding', metavar='ENCODING',
|
'--encoding',
|
||||||
|
dest='encoding', metavar='ENCODING',
|
||||||
help='Force the specified encoding (experimental)')
|
help='Force the specified encoding (experimental)')
|
||||||
workarounds.add_option(
|
workarounds.add_option(
|
||||||
'--no-check-certificate', action='store_true',
|
'--no-check-certificate',
|
||||||
dest='no_check_certificate', default=False,
|
action='store_true', dest='no_check_certificate', default=False,
|
||||||
help='Suppress HTTPS certificate validation.')
|
help='Suppress HTTPS certificate validation.')
|
||||||
workarounds.add_option(
|
workarounds.add_option(
|
||||||
'--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
|
'--prefer-insecure',
|
||||||
|
'--prefer-unsecure', action='store_true', dest='prefer_insecure',
|
||||||
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
|
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
|
||||||
workarounds.add_option(
|
workarounds.add_option(
|
||||||
'--user-agent', metavar='UA',
|
'--user-agent',
|
||||||
dest='user_agent', help='specify a custom user agent')
|
metavar='UA', dest='user_agent',
|
||||||
|
help='specify a custom user agent')
|
||||||
workarounds.add_option(
|
workarounds.add_option(
|
||||||
'--referer', metavar='REF',
|
'--referer',
|
||||||
dest='referer', default=None,
|
metavar='URL', dest='referer', default=None,
|
||||||
help='specify a custom referer, use if the video access is restricted to one domain',
|
help='specify a custom referer, use if the video access is restricted to one domain',
|
||||||
)
|
)
|
||||||
workarounds.add_option(
|
workarounds.add_option(
|
||||||
'--add-header', metavar='FIELD:VALUE',
|
'--add-header',
|
||||||
dest='headers', action='append',
|
metavar='FIELD:VALUE', dest='headers', action='append',
|
||||||
help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
|
help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
|
||||||
)
|
)
|
||||||
workarounds.add_option(
|
workarounds.add_option(
|
||||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
'--bidi-workaround',
|
||||||
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
dest='bidi_workaround', action='store_true',
|
||||||
|
help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
||||||
|
|
||||||
verbosity.add_option('-q', '--quiet',
|
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
||||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
verbosity.add_option(
|
||||||
|
'-q', '--quiet',
|
||||||
|
action='store_true', dest='quiet', default=False,
|
||||||
|
help='activates quiet mode')
|
||||||
verbosity.add_option(
|
verbosity.add_option(
|
||||||
'--no-warnings',
|
'--no-warnings',
|
||||||
dest='no_warnings', action='store_true', default=False,
|
dest='no_warnings', action='store_true', default=False,
|
||||||
help='Ignore warnings')
|
help='Ignore warnings')
|
||||||
verbosity.add_option('-s', '--simulate',
|
verbosity.add_option(
|
||||||
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
'-s', '--simulate',
|
||||||
verbosity.add_option('--skip-download',
|
action='store_true', dest='simulate', default=False,
|
||||||
action='store_true', dest='skip_download', help='do not download the video', default=False)
|
help='do not download the video and do not write anything to disk',)
|
||||||
verbosity.add_option('-g', '--get-url',
|
verbosity.add_option(
|
||||||
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
'--skip-download',
|
||||||
verbosity.add_option('-e', '--get-title',
|
action='store_true', dest='skip_download', default=False,
|
||||||
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
help='do not download the video',)
|
||||||
verbosity.add_option('--get-id',
|
verbosity.add_option(
|
||||||
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
'-g', '--get-url',
|
||||||
verbosity.add_option('--get-thumbnail',
|
action='store_true', dest='geturl', default=False,
|
||||||
action='store_true', dest='getthumbnail',
|
help='simulate, quiet but print URL')
|
||||||
help='simulate, quiet but print thumbnail URL', default=False)
|
verbosity.add_option(
|
||||||
verbosity.add_option('--get-description',
|
'-e', '--get-title',
|
||||||
action='store_true', dest='getdescription',
|
action='store_true', dest='gettitle', default=False,
|
||||||
help='simulate, quiet but print video description', default=False)
|
help='simulate, quiet but print title')
|
||||||
verbosity.add_option('--get-duration',
|
verbosity.add_option(
|
||||||
action='store_true', dest='getduration',
|
'--get-id',
|
||||||
help='simulate, quiet but print video length', default=False)
|
action='store_true', dest='getid', default=False,
|
||||||
verbosity.add_option('--get-filename',
|
help='simulate, quiet but print id')
|
||||||
action='store_true', dest='getfilename',
|
verbosity.add_option(
|
||||||
help='simulate, quiet but print output filename', default=False)
|
'--get-thumbnail',
|
||||||
verbosity.add_option('--get-format',
|
action='store_true', dest='getthumbnail', default=False,
|
||||||
action='store_true', dest='getformat',
|
help='simulate, quiet but print thumbnail URL')
|
||||||
help='simulate, quiet but print output format', default=False)
|
verbosity.add_option(
|
||||||
verbosity.add_option('-j', '--dump-json',
|
'--get-description',
|
||||||
action='store_true', dest='dumpjson',
|
action='store_true', dest='getdescription', default=False,
|
||||||
help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
|
help='simulate, quiet but print video description')
|
||||||
verbosity.add_option('--newline',
|
verbosity.add_option(
|
||||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
'--get-duration',
|
||||||
verbosity.add_option('--no-progress',
|
action='store_true', dest='getduration', default=False,
|
||||||
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
help='simulate, quiet but print video length')
|
||||||
verbosity.add_option('--console-title',
|
verbosity.add_option(
|
||||||
action='store_true', dest='consoletitle',
|
'--get-filename',
|
||||||
help='display progress in console titlebar', default=False)
|
action='store_true', dest='getfilename', default=False,
|
||||||
verbosity.add_option('-v', '--verbose',
|
help='simulate, quiet but print output filename')
|
||||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
verbosity.add_option(
|
||||||
verbosity.add_option('--dump-intermediate-pages',
|
'--get-format',
|
||||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
action='store_true', dest='getformat', default=False,
|
||||||
help='print downloaded pages to debug problems (very verbose)')
|
help='simulate, quiet but print output format')
|
||||||
verbosity.add_option('--write-pages',
|
verbosity.add_option(
|
||||||
action='store_true', dest='write_pages', default=False,
|
'-j', '--dump-json',
|
||||||
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
action='store_true', dest='dumpjson', default=False,
|
||||||
verbosity.add_option('--youtube-print-sig-code',
|
help='simulate, quiet but print JSON information. See --output for a description of available keys.')
|
||||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
verbosity.add_option(
|
||||||
help=optparse.SUPPRESS_HELP)
|
'--newline',
|
||||||
verbosity.add_option('--print-traffic',
|
action='store_true', dest='progress_with_newline', default=False,
|
||||||
dest='debug_printtraffic', action='store_true', default=False,
|
help='output progress bar as new lines')
|
||||||
help='Display sent and read HTTP traffic')
|
verbosity.add_option(
|
||||||
|
'--no-progress',
|
||||||
|
action='store_true', dest='noprogress', default=False,
|
||||||
|
help='do not print progress bar')
|
||||||
|
verbosity.add_option(
|
||||||
|
'--console-title',
|
||||||
|
action='store_true', dest='consoletitle', default=False,
|
||||||
|
help='display progress in console titlebar')
|
||||||
|
verbosity.add_option(
|
||||||
|
'-v', '--verbose',
|
||||||
|
action='store_true', dest='verbose', default=False,
|
||||||
|
help='print various debugging information')
|
||||||
|
verbosity.add_option(
|
||||||
|
'--dump-intermediate-pages',
|
||||||
|
action='store_true', dest='dump_intermediate_pages', default=False,
|
||||||
|
help='print downloaded pages to debug problems (very verbose)')
|
||||||
|
verbosity.add_option(
|
||||||
|
'--write-pages',
|
||||||
|
action='store_true', dest='write_pages', default=False,
|
||||||
|
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
||||||
|
verbosity.add_option(
|
||||||
|
'--youtube-print-sig-code',
|
||||||
|
action='store_true', dest='youtube_print_sig_code', default=False,
|
||||||
|
help=optparse.SUPPRESS_HELP)
|
||||||
|
verbosity.add_option(
|
||||||
|
'--print-traffic',
|
||||||
|
dest='debug_printtraffic', action='store_true', default=False,
|
||||||
|
help='Display sent and read HTTP traffic')
|
||||||
|
|
||||||
|
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
||||||
filesystem.add_option('-a', '--batch-file',
|
filesystem.add_option(
|
||||||
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
'-a', '--batch-file',
|
||||||
filesystem.add_option('--id',
|
dest='batchfile', metavar='FILE',
|
||||||
action='store_true', dest='useid', help='use only video ID in file name', default=False)
|
help='file containing URLs to download (\'-\' for stdin)')
|
||||||
filesystem.add_option('-A', '--auto-number',
|
filesystem.add_option(
|
||||||
action='store_true', dest='autonumber',
|
'--id', default=False,
|
||||||
help='number downloaded files starting from 00000', default=False)
|
action='store_true', dest='useid', help='use only video ID in file name')
|
||||||
filesystem.add_option('-o', '--output',
|
filesystem.add_option(
|
||||||
dest='outtmpl', metavar='TEMPLATE',
|
'-A', '--auto-number',
|
||||||
help=('output filename template. Use %(title)s to get the title, '
|
action='store_true', dest='autonumber', default=False,
|
||||||
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
help='number downloaded files starting from 00000')
|
||||||
'%(autonumber)s to get an automatically incremented number, '
|
filesystem.add_option(
|
||||||
'%(ext)s for the filename extension, '
|
'-o', '--output',
|
||||||
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
dest='outtmpl', metavar='TEMPLATE',
|
||||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
help=('output filename template. Use %(title)s to get the title, '
|
||||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
||||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
'%(autonumber)s to get an automatically incremented number, '
|
||||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
'%(ext)s for the filename extension, '
|
||||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
||||||
'%(height)s and %(width)s for the width and height of the video format. '
|
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
||||||
'%(resolution)s for a textual description of the resolution of the video format. '
|
'%(upload_date)s for the upload date (YYYYMMDD), '
|
||||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
||||||
filesystem.add_option('--autonumber-size',
|
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
||||||
dest='autonumber_size', metavar='NUMBER',
|
'%(height)s and %(width)s for the width and height of the video format. '
|
||||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
'%(resolution)s for a textual description of the resolution of the video format. '
|
||||||
filesystem.add_option('--restrict-filenames',
|
'Use - to output to stdout. Can also be used to download to a different directory, '
|
||||||
action='store_true', dest='restrictfilenames',
|
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
filesystem.add_option(
|
||||||
filesystem.add_option('-t', '--title',
|
'--autonumber-size',
|
||||||
action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
|
dest='autonumber_size', metavar='NUMBER',
|
||||||
filesystem.add_option('-l', '--literal',
|
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
||||||
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
|
filesystem.add_option(
|
||||||
filesystem.add_option('-w', '--no-overwrites',
|
'--restrict-filenames',
|
||||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
action='store_true', dest='restrictfilenames', default=False,
|
||||||
filesystem.add_option('-c', '--continue',
|
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
|
||||||
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
filesystem.add_option(
|
||||||
filesystem.add_option('--no-continue',
|
'-t', '--title',
|
||||||
action='store_false', dest='continue_dl',
|
action='store_true', dest='usetitle', default=False,
|
||||||
help='do not resume partially downloaded files (restart from beginning)')
|
help='[deprecated] use title in file name (default)')
|
||||||
filesystem.add_option('--no-part',
|
filesystem.add_option(
|
||||||
action='store_true', dest='nopart', help='do not use .part files', default=False)
|
'-l', '--literal', default=False,
|
||||||
filesystem.add_option('--no-mtime',
|
action='store_true', dest='usetitle',
|
||||||
action='store_false', dest='updatetime',
|
help='[deprecated] alias of --title')
|
||||||
help='do not use the Last-modified header to set the file modification time', default=True)
|
filesystem.add_option(
|
||||||
filesystem.add_option('--write-description',
|
'-w', '--no-overwrites',
|
||||||
action='store_true', dest='writedescription',
|
action='store_true', dest='nooverwrites', default=False,
|
||||||
help='write video description to a .description file', default=False)
|
help='do not overwrite files')
|
||||||
filesystem.add_option('--write-info-json',
|
filesystem.add_option(
|
||||||
action='store_true', dest='writeinfojson',
|
'-c', '--continue',
|
||||||
help='write video metadata to a .info.json file', default=False)
|
action='store_true', dest='continue_dl', default=True,
|
||||||
filesystem.add_option('--write-annotations',
|
help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
|
||||||
action='store_true', dest='writeannotations',
|
filesystem.add_option(
|
||||||
help='write video annotations to a .annotation file', default=False)
|
'--no-continue',
|
||||||
filesystem.add_option('--write-thumbnail',
|
action='store_false', dest='continue_dl',
|
||||||
action='store_true', dest='writethumbnail',
|
help='do not resume partially downloaded files (restart from beginning)')
|
||||||
help='write thumbnail image to disk', default=False)
|
filesystem.add_option(
|
||||||
filesystem.add_option('--load-info',
|
'--no-part',
|
||||||
dest='load_info_filename', metavar='FILE',
|
action='store_true', dest='nopart', default=False,
|
||||||
help='json file containing the video information (created with the "--write-json" option)')
|
help='do not use .part files - write directly into output file')
|
||||||
filesystem.add_option('--cookies',
|
filesystem.add_option(
|
||||||
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
|
'--no-mtime',
|
||||||
|
action='store_false', dest='updatetime', default=True,
|
||||||
|
help='do not use the Last-modified header to set the file modification time')
|
||||||
|
filesystem.add_option(
|
||||||
|
'--write-description',
|
||||||
|
action='store_true', dest='writedescription', default=False,
|
||||||
|
help='write video description to a .description file')
|
||||||
|
filesystem.add_option(
|
||||||
|
'--write-info-json',
|
||||||
|
action='store_true', dest='writeinfojson', default=False,
|
||||||
|
help='write video metadata to a .info.json file')
|
||||||
|
filesystem.add_option(
|
||||||
|
'--write-annotations',
|
||||||
|
action='store_true', dest='writeannotations', default=False,
|
||||||
|
help='write video annotations to a .annotation file')
|
||||||
|
filesystem.add_option(
|
||||||
|
'--write-thumbnail',
|
||||||
|
action='store_true', dest='writethumbnail', default=False,
|
||||||
|
help='write thumbnail image to disk')
|
||||||
|
filesystem.add_option(
|
||||||
|
'--load-info',
|
||||||
|
dest='load_info_filename', metavar='FILE',
|
||||||
|
help='json file containing the video information (created with the "--write-json" option)')
|
||||||
|
filesystem.add_option(
|
||||||
|
'--cookies',
|
||||||
|
dest='cookiefile', metavar='FILE',
|
||||||
|
help='file to read cookies from and dump cookie jar in')
|
||||||
filesystem.add_option(
|
filesystem.add_option(
|
||||||
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
|
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
|
||||||
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
|
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
|
||||||
@ -412,36 +541,61 @@ def parseOpts(overrideArguments=None):
|
|||||||
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
|
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
|
||||||
help='Disable filesystem caching')
|
help='Disable filesystem caching')
|
||||||
filesystem.add_option(
|
filesystem.add_option(
|
||||||
'--rm-cache-dir', action='store_true', dest='rm_cachedir',
|
'--rm-cache-dir',
|
||||||
|
action='store_true', dest='rm_cachedir',
|
||||||
help='Delete all filesystem cache files')
|
help='Delete all filesystem cache files')
|
||||||
|
|
||||||
|
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
||||||
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
|
postproc.add_option(
|
||||||
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
'-x', '--extract-audio',
|
||||||
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
action='store_true', dest='extractaudio', default=False,
|
||||||
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
||||||
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
|
postproc.add_option(
|
||||||
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
'--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
||||||
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
|
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
|
||||||
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
|
postproc.add_option(
|
||||||
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
|
'--audio-quality', metavar='QUALITY',
|
||||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
dest='audioquality', default='5',
|
||||||
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
|
||||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
postproc.add_option(
|
||||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
'--recode-video',
|
||||||
help='embed subtitles in the video (only for mp4 videos)')
|
metavar='FORMAT', dest='recodevideo', default=None,
|
||||||
postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
|
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
|
||||||
help='embed thumbnail in the audio as cover art')
|
postproc.add_option(
|
||||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
'-k', '--keep-video',
|
||||||
help='write metadata to the video file')
|
action='store_true', dest='keepvideo', default=False,
|
||||||
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
||||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
postproc.add_option(
|
||||||
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
|
'--no-post-overwrites',
|
||||||
|
action='store_true', dest='nopostoverwrites', default=False,
|
||||||
|
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
||||||
|
postproc.add_option(
|
||||||
|
'--embed-subs',
|
||||||
|
action='store_true', dest='embedsubtitles', default=False,
|
||||||
|
help='embed subtitles in the video (only for mp4 videos)')
|
||||||
|
postproc.add_option(
|
||||||
|
'--embed-thumbnail',
|
||||||
|
action='store_true', dest='embedthumbnail', default=False,
|
||||||
|
help='embed thumbnail in the audio as cover art')
|
||||||
|
postproc.add_option(
|
||||||
|
'--add-metadata',
|
||||||
|
action='store_true', dest='addmetadata', default=False,
|
||||||
|
help='write metadata to the video file')
|
||||||
|
postproc.add_option(
|
||||||
|
'--xattrs',
|
||||||
|
action='store_true', dest='xattrs', default=False,
|
||||||
|
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||||
|
postproc.add_option(
|
||||||
|
'--prefer-avconv',
|
||||||
|
action='store_false', dest='prefer_ffmpeg',
|
||||||
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
||||||
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
|
postproc.add_option(
|
||||||
|
'--prefer-ffmpeg',
|
||||||
|
action='store_true', dest='prefer_ffmpeg',
|
||||||
help='Prefer ffmpeg over avconv for running the postprocessors')
|
help='Prefer ffmpeg over avconv for running the postprocessors')
|
||||||
postproc.add_option(
|
postproc.add_option(
|
||||||
'--exec', metavar='CMD', dest='exec_cmd',
|
'--exec',
|
||||||
|
metavar='CMD', dest='exec_cmd',
|
||||||
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
|
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
|
||||||
|
|
||||||
parser.add_option_group(general)
|
parser.add_option_group(general)
|
||||||
@ -458,7 +612,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
if overrideArguments is not None:
|
if overrideArguments is not None:
|
||||||
opts, args = parser.parse_args(overrideArguments)
|
opts, args = parser.parse_args(overrideArguments)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
|
||||||
else:
|
else:
|
||||||
commandLineConf = sys.argv[1:]
|
commandLineConf = sys.argv[1:]
|
||||||
if '--ignore-config' in commandLineConf:
|
if '--ignore-config' in commandLineConf:
|
||||||
@ -474,8 +628,8 @@ def parseOpts(overrideArguments=None):
|
|||||||
|
|
||||||
opts, args = parser.parse_args(argv)
|
opts, args = parser.parse_args(argv)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
write_string('[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
||||||
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
write_string('[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
||||||
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
write_string('[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
||||||
|
|
||||||
return parser, opts, args
|
return parser, opts, args
|
||||||
|
@ -487,7 +487,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
|
|||||||
class FFmpegMergerPP(FFmpegPostProcessor):
|
class FFmpegMergerPP(FFmpegPostProcessor):
|
||||||
def run(self, info):
|
def run(self, info):
|
||||||
filename = info['filepath']
|
filename = info['filepath']
|
||||||
args = ['-c', 'copy']
|
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest']
|
||||||
self._downloader.to_screen(u'[ffmpeg] Merging formats into "%s"' % filename)
|
self._downloader.to_screen(u'[ffmpeg] Merging formats into "%s"' % filename)
|
||||||
self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
|
self.run_ffmpeg_multiple_files(info['__files_to_merge'], filename, args)
|
||||||
return True, info
|
return True, info
|
||||||
|
@ -673,6 +673,8 @@ class ExtractorError(Exception):
|
|||||||
expected = True
|
expected = True
|
||||||
if video_id is not None:
|
if video_id is not None:
|
||||||
msg = video_id + ': ' + msg
|
msg = video_id + ': ' + msg
|
||||||
|
if cause:
|
||||||
|
msg += u' (caused by %r)' % cause
|
||||||
if not expected:
|
if not expected:
|
||||||
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
|
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
|
||||||
super(ExtractorError, self).__init__(msg)
|
super(ExtractorError, self).__init__(msg)
|
||||||
@ -799,6 +801,12 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
|||||||
del req.headers['User-agent']
|
del req.headers['User-agent']
|
||||||
req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
|
req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
|
||||||
del req.headers['Youtubedl-user-agent']
|
del req.headers['Youtubedl-user-agent']
|
||||||
|
|
||||||
|
if sys.version_info < (2, 7) and '#' in req.get_full_url():
|
||||||
|
# Python 2.6 is brain-dead when it comes to fragments
|
||||||
|
req._Request__original = req._Request__original.partition('#')[0]
|
||||||
|
req._Request__r_type = req._Request__r_type.partition('#')[0]
|
||||||
|
|
||||||
return req
|
return req
|
||||||
|
|
||||||
def http_response(self, req, resp):
|
def http_response(self, req, resp):
|
||||||
@ -884,7 +892,9 @@ def unified_strdate(date_str):
|
|||||||
'%d/%m/%Y',
|
'%d/%m/%Y',
|
||||||
'%d/%m/%y',
|
'%d/%m/%y',
|
||||||
'%Y/%m/%d %H:%M:%S',
|
'%Y/%m/%d %H:%M:%S',
|
||||||
|
'%d/%m/%Y %H:%M:%S',
|
||||||
'%Y-%m-%d %H:%M:%S',
|
'%Y-%m-%d %H:%M:%S',
|
||||||
|
'%Y-%m-%d %H:%M:%S.%f',
|
||||||
'%d.%m.%Y %H:%M',
|
'%d.%m.%Y %H:%M',
|
||||||
'%d.%m.%Y %H.%M',
|
'%d.%m.%Y %H.%M',
|
||||||
'%Y-%m-%dT%H:%M:%SZ',
|
'%Y-%m-%dT%H:%M:%SZ',
|
||||||
@ -1384,14 +1394,16 @@ def check_executable(exe, args=[]):
|
|||||||
|
|
||||||
|
|
||||||
class PagedList(object):
|
class PagedList(object):
|
||||||
def __init__(self, pagefunc, pagesize):
|
|
||||||
self._pagefunc = pagefunc
|
|
||||||
self._pagesize = pagesize
|
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
# This is only useful for tests
|
# This is only useful for tests
|
||||||
return len(self.getslice())
|
return len(self.getslice())
|
||||||
|
|
||||||
|
|
||||||
|
class OnDemandPagedList(PagedList):
|
||||||
|
def __init__(self, pagefunc, pagesize):
|
||||||
|
self._pagefunc = pagefunc
|
||||||
|
self._pagesize = pagesize
|
||||||
|
|
||||||
def getslice(self, start=0, end=None):
|
def getslice(self, start=0, end=None):
|
||||||
res = []
|
res = []
|
||||||
for pagenum in itertools.count(start // self._pagesize):
|
for pagenum in itertools.count(start // self._pagesize):
|
||||||
@ -1430,6 +1442,35 @@ class PagedList(object):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
class InAdvancePagedList(PagedList):
|
||||||
|
def __init__(self, pagefunc, pagecount, pagesize):
|
||||||
|
self._pagefunc = pagefunc
|
||||||
|
self._pagecount = pagecount
|
||||||
|
self._pagesize = pagesize
|
||||||
|
|
||||||
|
def getslice(self, start=0, end=None):
|
||||||
|
res = []
|
||||||
|
start_page = start // self._pagesize
|
||||||
|
end_page = (
|
||||||
|
self._pagecount if end is None else (end // self._pagesize + 1))
|
||||||
|
skip_elems = start - start_page * self._pagesize
|
||||||
|
only_more = None if end is None else end - start
|
||||||
|
for pagenum in range(start_page, end_page):
|
||||||
|
page = list(self._pagefunc(pagenum))
|
||||||
|
if skip_elems:
|
||||||
|
page = page[skip_elems:]
|
||||||
|
skip_elems = None
|
||||||
|
if only_more is not None:
|
||||||
|
if len(page) < only_more:
|
||||||
|
only_more -= len(page)
|
||||||
|
else:
|
||||||
|
page = page[:only_more]
|
||||||
|
res.extend(page)
|
||||||
|
break
|
||||||
|
res.extend(page)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
def uppercase_escape(s):
|
def uppercase_escape(s):
|
||||||
unicode_escape = codecs.getdecoder('unicode_escape')
|
unicode_escape = codecs.getdecoder('unicode_escape')
|
||||||
return re.sub(
|
return re.sub(
|
||||||
@ -1534,33 +1575,37 @@ US_RATINGS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def parse_age_limit(s):
|
||||||
|
if s is None:
|
||||||
|
return None
|
||||||
|
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
|
||||||
|
return int(m.group('age')) if m else US_RATINGS.get(s, None)
|
||||||
|
|
||||||
|
|
||||||
def strip_jsonp(code):
|
def strip_jsonp(code):
|
||||||
return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
|
return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
|
||||||
|
|
||||||
|
|
||||||
def js_to_json(code):
|
def js_to_json(code):
|
||||||
def fix_kv(m):
|
def fix_kv(m):
|
||||||
key = m.group(2)
|
v = m.group(0)
|
||||||
if key.startswith("'"):
|
if v in ('true', 'false', 'null'):
|
||||||
assert key.endswith("'")
|
return v
|
||||||
assert '"' not in key
|
if v.startswith('"'):
|
||||||
key = '"%s"' % key[1:-1]
|
return v
|
||||||
elif not key.startswith('"'):
|
if v.startswith("'"):
|
||||||
key = '"%s"' % key
|
v = v[1:-1]
|
||||||
|
v = re.sub(r"\\\\|\\'|\"", lambda m: {
|
||||||
value = m.group(4)
|
'\\\\': '\\\\',
|
||||||
if value.startswith("'"):
|
"\\'": "'",
|
||||||
assert value.endswith("'")
|
'"': '\\"',
|
||||||
assert '"' not in value
|
}[m.group(0)], v)
|
||||||
value = '"%s"' % value[1:-1]
|
return '"%s"' % v
|
||||||
|
|
||||||
return m.group(1) + key + m.group(3) + value
|
|
||||||
|
|
||||||
res = re.sub(r'''(?x)
|
res = re.sub(r'''(?x)
|
||||||
([{,]\s*)
|
"(?:[^"\\]*(?:\\\\|\\")?)*"|
|
||||||
("[^"]*"|\'[^\']*\'|[a-z0-9A-Z]+)
|
'(?:[^'\\]*(?:\\\\|\\')?)*'|
|
||||||
(:\s*)
|
[a-zA-Z_][a-zA-Z_0-9]*
|
||||||
([0-9.]+|true|false|"[^"]*"|\'[^\']*\'|\[|\{)
|
|
||||||
''', fix_kv, code)
|
''', fix_kv, code)
|
||||||
res = re.sub(r',(\s*\])', lambda m: m.group(1), res)
|
res = re.sub(r',(\s*\])', lambda m: m.group(1), res)
|
||||||
return res
|
return res
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
|
|
||||||
__version__ = '2014.09.24.1'
|
__version__ = '2014.10.13'
|
||||||
|
Reference in New Issue
Block a user