Compare commits
176 Commits
2014.11.02
...
2014.11.23
Author | SHA1 | Date | |
---|---|---|---|
b9042def9d | |||
aa79ac0c82 | |||
88125905cf | |||
dd60be2bf9 | |||
119b3caa46 | |||
49f0da7ae1 | |||
2cead7e7bc | |||
2c64b8ba63 | |||
42e12102a9 | |||
6127693ed9 | |||
71069d2157 | |||
f3391db889 | |||
9b32eca3ce | |||
ec06f0f610 | |||
e6c9c8f6ee | |||
85b9275517 | |||
dfd5313afd | |||
be53e2a737 | |||
a1c68b9ef2 | |||
4d46c1c68c | |||
d6f714f321 | |||
8569f3d629 | |||
fed5d03260 | |||
6adeffa7c6 | |||
b244b5c3f9 | |||
f42c190769 | |||
c9bf41145f | |||
5239075bb6 | |||
84437adfa3 | |||
732ea2f09b | |||
aff2f4f4f5 | |||
3b9f631c41 | |||
3ba098a6a5 | |||
1394646a0a | |||
61ee5aeb73 | |||
07e378fa18 | |||
e07e931375 | |||
480b7c32a9 | |||
f56875f271 | |||
92120217eb | |||
37eddd3143 | |||
0857baade3 | |||
23ad44b57b | |||
f48d3e9bbc | |||
fbf94a7815 | |||
1921b24551 | |||
28e614de5c | |||
cd9ad1d7e8 | |||
162f54eca6 | |||
33a266f4ba | |||
6b592d93a2 | |||
4686ae4b64 | |||
8d05f2c16a | |||
a4bb83956c | |||
eb5376044c | |||
3cbcff8a2d | |||
e983cf5277 | |||
0ab1ca5501 | |||
4baafa229d | |||
7f3e33a147 | |||
b7558d9881 | |||
a0f59cdcb4 | |||
a4bc433619 | |||
b6b70730bf | |||
6a68bb574a | |||
0cf166ad4f | |||
2707b50ffe | |||
939fe70de0 | |||
89c15fe0b3 | |||
ec5f601670 | |||
8caa0c9779 | |||
e2548b5b25 | |||
bbefcf04bf | |||
c7b0add86f | |||
a0155d93d9 | |||
00d9ef0b70 | |||
0cc8888038 | |||
c735450e07 | |||
71f8c7ce7a | |||
5fee0eeac0 | |||
eb4157fd17 | |||
69ede8ef81 | |||
609a61e3e6 | |||
bf951c5e29 | |||
af63fed7d8 | |||
68d1d41c03 | |||
3deed1e91a | |||
11b28e93d3 | |||
c3d582985f | |||
4c0924bb24 | |||
3fa5bb3802 | |||
c47ec62b83 | |||
e4bdb37ec6 | |||
3e6e4999ca | |||
0e15e725a0 | |||
437f68d868 | |||
d91d124081 | |||
2d42905b68 | |||
cbe71cb41d | |||
894dd8682e | |||
9e05d039e0 | |||
bbd5f2de5e | |||
73689dafbf | |||
4b50ba0989 | |||
5ccaddf5b1 | |||
0b201a3134 | |||
ffe38646ca | |||
b703ab4d7f | |||
c6afed48ff | |||
732c848c14 | |||
9d2a4dae90 | |||
7009a9047a | |||
498942f187 | |||
28465df1ff | |||
ef89dba58f | |||
13ba3a6461 | |||
8f6ec4bbe6 | |||
c295490830 | |||
eb4cb42a02 | |||
7a8cbc72b2 | |||
2774852c2f | |||
bbcc21efd1 | |||
60526d6bcb | |||
1d4df56d09 | |||
a1cf99d03a | |||
3c6af203cc | |||
1a92e086a7 | |||
519c73f267 | |||
a6dae6c09c | |||
f866e474f3 | |||
8bb9b97c97 | |||
d6fdc38682 | |||
c2b61af548 | |||
2fdbf27ad8 | |||
3898c8a7b2 | |||
29ed169cd6 | |||
b868c972d1 | |||
9908e03528 | |||
1fe8fb8c20 | |||
5d63b0aa93 | |||
4164f0117e | |||
37aab27808 | |||
6110bbbfdd | |||
cde9b380e6 | |||
dab647a7b6 | |||
a316a83d2b | |||
81b22aee8b | |||
a80c96eab0 | |||
20436c30c9 | |||
3828505646 | |||
11fba1751d | |||
12ea2f30cf | |||
9c3e870393 | |||
44789f2457 | |||
711ede6e1b | |||
a32f253112 | |||
94bd361318 | |||
acd40f64ed | |||
766306450d | |||
e7642ab572 | |||
bdf9701729 | |||
b5af6fcdad | |||
278143df5b | |||
fdca55fe34 | |||
4f195f55f0 | |||
ac35c26686 | |||
982a58d049 | |||
42f7d2f588 | |||
39f0a2a6b7 | |||
ecc0c5ee01 | |||
451948b28c | |||
baa708036c | |||
8c25f81bee | |||
4c83c96795 | |||
9580711841 | |||
ccdd0ffb80 |
3
AUTHORS
3
AUTHORS
@ -80,3 +80,6 @@ Damon Timm
|
|||||||
winwon
|
winwon
|
||||||
Xavier Beynon
|
Xavier Beynon
|
||||||
Gabriel Schubiner
|
Gabriel Schubiner
|
||||||
|
xantares
|
||||||
|
Jan Matějka
|
||||||
|
Mauroy Sébastien
|
||||||
|
31
README.md
31
README.md
@ -131,17 +131,19 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
%(upload_date)s for the upload date
|
%(upload_date)s for the upload date
|
||||||
(YYYYMMDD), %(extractor)s for the provider
|
(YYYYMMDD), %(extractor)s for the provider
|
||||||
(youtube, metacafe, etc), %(id)s for the
|
(youtube, metacafe, etc), %(id)s for the
|
||||||
video id, %(playlist)s for the playlist the
|
video id, %(playlist_title)s,
|
||||||
|
%(playlist_id)s, or %(playlist)s (=title if
|
||||||
|
present, ID otherwise) for the playlist the
|
||||||
video is in, %(playlist_index)s for the
|
video is in, %(playlist_index)s for the
|
||||||
position in the playlist and %% for a
|
position in the playlist. %(height)s and
|
||||||
literal percent. %(height)s and %(width)s
|
%(width)s for the width and height of the
|
||||||
for the width and height of the video
|
video format. %(resolution)s for a textual
|
||||||
format. %(resolution)s for a textual
|
|
||||||
description of the resolution of the video
|
description of the resolution of the video
|
||||||
format. Use - to output to stdout. Can also
|
format. %% for a literal percent. Use - to
|
||||||
be used to download to a different
|
output to stdout. Can also be used to
|
||||||
directory, for example with -o '/my/downloa
|
download to a different directory, for
|
||||||
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
example with -o '/my/downloads/%(uploader)s
|
||||||
|
/%(title)s-%(id)s.%(ext)s' .
|
||||||
--autonumber-size NUMBER Specifies the number of digits in
|
--autonumber-size NUMBER Specifies the number of digits in
|
||||||
%(autonumber)s when it is present in output
|
%(autonumber)s when it is present in output
|
||||||
filename template or --auto-number option
|
filename template or --auto-number option
|
||||||
@ -239,8 +241,13 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
"worst", "worstvideo" and "worstaudio". By
|
"worst", "worstvideo" and "worstaudio". By
|
||||||
default, youtube-dl will pick the best
|
default, youtube-dl will pick the best
|
||||||
quality. Use commas to download multiple
|
quality. Use commas to download multiple
|
||||||
audio formats, such as -f
|
audio formats, such as -f
|
||||||
136/137/mp4/bestvideo,140/m4a/bestaudio
|
136/137/mp4/bestvideo,140/m4a/bestaudio.
|
||||||
|
You can merge the video and audio of two
|
||||||
|
formats into a single file using -f <video-
|
||||||
|
format>+<audio-format> (requires ffmpeg or
|
||||||
|
avconv), for example -f
|
||||||
|
bestvideo+bestaudio.
|
||||||
--all-formats download all available video formats
|
--all-formats download all available video formats
|
||||||
--prefer-free-formats prefer free video formats unless a specific
|
--prefer-free-formats prefer free video formats unless a specific
|
||||||
one is requested
|
one is requested
|
||||||
@ -500,7 +507,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||||
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
|
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
|
||||||
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
|
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add youtube_dl/extractor/__init__.py
|
$ git add youtube_dl/extractor/__init__.py
|
||||||
$ git add youtube_dl/extractor/yourextractor.py
|
$ git add youtube_dl/extractor/yourextractor.py
|
||||||
|
@ -57,7 +57,7 @@ class FakeYDL(YoutubeDL):
|
|||||||
# Different instances of the downloader can't share the same dictionary
|
# Different instances of the downloader can't share the same dictionary
|
||||||
# some test set the "sublang" parameter, which would break the md5 checks.
|
# some test set the "sublang" parameter, which would break the md5 checks.
|
||||||
params = get_params(override=override)
|
params = get_params(override=override)
|
||||||
super(FakeYDL, self).__init__(params)
|
super(FakeYDL, self).__init__(params, auto_init=False)
|
||||||
self.result = []
|
self.result = []
|
||||||
|
|
||||||
def to_screen(self, s, skip_eol=None):
|
def to_screen(self, s, skip_eol=None):
|
||||||
@ -145,7 +145,8 @@ def expect_info_dict(self, expected_dict, got_dict):
|
|||||||
info_dict_str = ''.join(
|
info_dict_str = ''.join(
|
||||||
' %s: %s,\n' % (_repr(k), _repr(v))
|
' %s: %s,\n' % (_repr(k), _repr(v))
|
||||||
for k, v in test_info_dict.items())
|
for k, v in test_info_dict.items())
|
||||||
write_string('\n"info_dict": {\n' + info_dict_str + '}\n', out=sys.stderr)
|
write_string(
|
||||||
|
'\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
missing_keys,
|
missing_keys,
|
||||||
'Missing keys in test definition: %s' % (
|
'Missing keys in test definition: %s' % (
|
||||||
|
18
test/swftests/ConstArrayAccess.as
Normal file
18
test/swftests/ConstArrayAccess.as
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 4
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class ConstArrayAccess {
|
||||||
|
private static const x:int = 2;
|
||||||
|
private static const ar:Array = ["42", "3411"];
|
||||||
|
|
||||||
|
public static function main():int{
|
||||||
|
var c:ConstArrayAccess = new ConstArrayAccess();
|
||||||
|
return c.f();
|
||||||
|
}
|
||||||
|
|
||||||
|
public function f(): int {
|
||||||
|
return ar[1].length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
12
test/swftests/ConstantInt.as
Normal file
12
test/swftests/ConstantInt.as
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 2
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class ConstantInt {
|
||||||
|
private static const x:int = 2;
|
||||||
|
|
||||||
|
public static function main():int{
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
10
test/swftests/DictCall.as
Normal file
10
test/swftests/DictCall.as
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// input: [{"x": 1, "y": 2}]
|
||||||
|
// output: 3
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class DictCall {
|
||||||
|
public static function main(d:Object):int{
|
||||||
|
return d.x + d.y;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
10
test/swftests/EqualsOperator.as
Normal file
10
test/swftests/EqualsOperator.as
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// input: []
|
||||||
|
// output: false
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class EqualsOperator {
|
||||||
|
public static function main():Boolean{
|
||||||
|
return 1 == 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
22
test/swftests/MemberAssignment.as
Normal file
22
test/swftests/MemberAssignment.as
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// input: [1]
|
||||||
|
// output: 2
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class MemberAssignment {
|
||||||
|
public var v:int;
|
||||||
|
|
||||||
|
public function g():int {
|
||||||
|
return this.v;
|
||||||
|
}
|
||||||
|
|
||||||
|
public function f(a:int):int{
|
||||||
|
this.v = a;
|
||||||
|
return this.v + this.g();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static function main(a:int): int {
|
||||||
|
var v:MemberAssignment = new MemberAssignment();
|
||||||
|
return v.f(a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
24
test/swftests/NeOperator.as
Normal file
24
test/swftests/NeOperator.as
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 123
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class NeOperator {
|
||||||
|
public static function main(): int {
|
||||||
|
var res:int = 0;
|
||||||
|
if (1 != 2) {
|
||||||
|
res += 3;
|
||||||
|
} else {
|
||||||
|
res += 4;
|
||||||
|
}
|
||||||
|
if (2 != 2) {
|
||||||
|
res += 10;
|
||||||
|
} else {
|
||||||
|
res += 20;
|
||||||
|
}
|
||||||
|
if (9 == 9) {
|
||||||
|
res += 100;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
22
test/swftests/PrivateVoidCall.as
Normal file
22
test/swftests/PrivateVoidCall.as
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 9
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class PrivateVoidCall {
|
||||||
|
public static function main():int{
|
||||||
|
var f:OtherClass = new OtherClass();
|
||||||
|
f.func();
|
||||||
|
return 9;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class OtherClass {
|
||||||
|
private function pf():void {
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
public function func():void {
|
||||||
|
this.pf();
|
||||||
|
}
|
||||||
|
}
|
11
test/swftests/StringBasics.as
Normal file
11
test/swftests/StringBasics.as
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 3
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StringBasics {
|
||||||
|
public static function main():int{
|
||||||
|
var s:String = "abc";
|
||||||
|
return s.length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
11
test/swftests/StringCharCodeAt.as
Normal file
11
test/swftests/StringCharCodeAt.as
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 9897
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StringCharCodeAt {
|
||||||
|
public static function main():int{
|
||||||
|
var s:String = "abc";
|
||||||
|
return s.charCodeAt(1) * 100 + s.charCodeAt();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
11
test/swftests/StringConversion.as
Normal file
11
test/swftests/StringConversion.as
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// input: []
|
||||||
|
// output: 2
|
||||||
|
|
||||||
|
package {
|
||||||
|
public class StringConversion {
|
||||||
|
public static function main():int{
|
||||||
|
var s:String = String(99);
|
||||||
|
return s.length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@ -19,7 +20,7 @@ def _download_restricted(url, filename, age):
|
|||||||
'age_limit': age,
|
'age_limit': age,
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
'writeinfojson': True,
|
'writeinfojson': True,
|
||||||
"outtmpl": "%(id)s.%(ext)s",
|
'outtmpl': '%(id)s.%(ext)s',
|
||||||
}
|
}
|
||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params)
|
||||||
ydl.add_default_info_extractors()
|
ydl.add_default_info_extractors()
|
||||||
|
46
test/test_compat.py
Normal file
46
test/test_compat.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
from youtube_dl.utils import get_filesystem_encoding
|
||||||
|
from youtube_dl.compat import (
|
||||||
|
compat_getenv,
|
||||||
|
compat_expanduser,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCompat(unittest.TestCase):
|
||||||
|
def test_compat_getenv(self):
|
||||||
|
test_str = 'тест'
|
||||||
|
os.environ['YOUTUBE-DL-TEST'] = (
|
||||||
|
test_str if sys.version_info >= (3, 0)
|
||||||
|
else test_str.encode(get_filesystem_encoding()))
|
||||||
|
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
||||||
|
|
||||||
|
def test_compat_expanduser(self):
|
||||||
|
old_home = os.environ.get('HOME')
|
||||||
|
test_str = 'C:\Documents and Settings\тест\Application Data'
|
||||||
|
os.environ['HOME'] = (
|
||||||
|
test_str if sys.version_info >= (3, 0)
|
||||||
|
else test_str.encode(get_filesystem_encoding()))
|
||||||
|
self.assertEqual(compat_expanduser('~'), test_str)
|
||||||
|
os.environ['HOME'] = old_home
|
||||||
|
|
||||||
|
def test_all_present(self):
|
||||||
|
import youtube_dl.compat
|
||||||
|
all_names = youtube_dl.compat.__all__
|
||||||
|
present_names = set(filter(
|
||||||
|
lambda c: '_' in c and not c.startswith('_'),
|
||||||
|
dir(youtube_dl.compat))) - set(['unicode_literals'])
|
||||||
|
self.assertEqual(all_names, sorted(present_names))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
@ -1,5 +1,7 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -23,10 +25,12 @@ import json
|
|||||||
import socket
|
import socket
|
||||||
|
|
||||||
import youtube_dl.YoutubeDL
|
import youtube_dl.YoutubeDL
|
||||||
from youtube_dl.utils import (
|
from youtube_dl.compat import (
|
||||||
compat_http_client,
|
compat_http_client,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
|
)
|
||||||
|
from youtube_dl.utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
@ -94,7 +98,7 @@ def generator(test_case):
|
|||||||
params.setdefault('extract_flat', True)
|
params.setdefault('extract_flat', True)
|
||||||
params.setdefault('skip_download', True)
|
params.setdefault('skip_download', True)
|
||||||
|
|
||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params, auto_init=False)
|
||||||
ydl.add_default_info_extractors()
|
ydl.add_default_info_extractors()
|
||||||
finished_hook_called = set()
|
finished_hook_called = set()
|
||||||
def _hook(status):
|
def _hook(status):
|
||||||
@ -208,9 +212,9 @@ for n, test_case in enumerate(defs):
|
|||||||
tname = 'test_' + str(test_case['name'])
|
tname = 'test_' + str(test_case['name'])
|
||||||
i = 1
|
i = 1
|
||||||
while hasattr(TestDownload, tname):
|
while hasattr(TestDownload, tname):
|
||||||
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
tname = 'test_%s_%d' % (test_case['name'], i)
|
||||||
i += 1
|
i += 1
|
||||||
test_method.__name__ = tname
|
test_method.__name__ = str(tname)
|
||||||
setattr(TestDownload, test_method.__name__, test_method)
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
del test_method
|
del test_method
|
||||||
|
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
@ -6,17 +9,19 @@ import subprocess
|
|||||||
|
|
||||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_DEV_NULL = subprocess.DEVNULL
|
_DEV_NULL = subprocess.DEVNULL
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
_DEV_NULL = open(os.devnull, 'wb')
|
_DEV_NULL = open(os.devnull, 'wb')
|
||||||
|
|
||||||
|
|
||||||
class TestExecution(unittest.TestCase):
|
class TestExecution(unittest.TestCase):
|
||||||
def test_import(self):
|
def test_import(self):
|
||||||
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
||||||
|
|
||||||
def test_module_exec(self):
|
def test_module_exec(self):
|
||||||
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
|
if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution
|
||||||
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||||
|
|
||||||
def test_main_exec(self):
|
def test_main_exec(self):
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@ -74,7 +75,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
||||||
|
|
||||||
def test_youtube_list_subtitles(self):
|
def test_youtube_list_subtitles(self):
|
||||||
self.DL.expect_warning(u'Video doesn\'t have automatic captions')
|
self.DL.expect_warning('Video doesn\'t have automatic captions')
|
||||||
self.DL.params['listsubtitles'] = True
|
self.DL.params['listsubtitles'] = True
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
self.assertEqual(info_dict, None)
|
self.assertEqual(info_dict, None)
|
||||||
@ -87,7 +88,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
self.assertTrue(subtitles['it'] is not None)
|
self.assertTrue(subtitles['it'] is not None)
|
||||||
|
|
||||||
def test_youtube_nosubtitles(self):
|
def test_youtube_nosubtitles(self):
|
||||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'n5BB19UTcdA'
|
self.url = 'n5BB19UTcdA'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
@ -101,7 +102,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['subtitleslangs'] = langs
|
self.DL.params['subtitleslangs'] = langs
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
for lang in langs:
|
for lang in langs:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
class TestDailymotionSubtitles(BaseTestSubtitles):
|
class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||||
@ -130,20 +131,20 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(len(subtitles.keys()), 5)
|
self.assertEqual(len(subtitles.keys()), 5)
|
||||||
|
|
||||||
def test_list_subtitles(self):
|
def test_list_subtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['listsubtitles'] = True
|
self.DL.params['listsubtitles'] = True
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
self.assertEqual(info_dict, None)
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
def test_automatic_captions(self):
|
def test_automatic_captions(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['writeautomaticsub'] = True
|
self.DL.params['writeautomaticsub'] = True
|
||||||
self.DL.params['subtitleslang'] = ['en']
|
self.DL.params['subtitleslang'] = ['en']
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertTrue(len(subtitles.keys()) == 0)
|
self.assertTrue(len(subtitles.keys()) == 0)
|
||||||
|
|
||||||
def test_nosubtitles(self):
|
def test_nosubtitles(self):
|
||||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
|
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
@ -156,7 +157,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['subtitleslangs'] = langs
|
self.DL.params['subtitleslangs'] = langs
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
for lang in langs:
|
for lang in langs:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
class TestTedSubtitles(BaseTestSubtitles):
|
class TestTedSubtitles(BaseTestSubtitles):
|
||||||
@ -185,13 +186,13 @@ class TestTedSubtitles(BaseTestSubtitles):
|
|||||||
self.assertTrue(len(subtitles.keys()) >= 28)
|
self.assertTrue(len(subtitles.keys()) >= 28)
|
||||||
|
|
||||||
def test_list_subtitles(self):
|
def test_list_subtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['listsubtitles'] = True
|
self.DL.params['listsubtitles'] = True
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
self.assertEqual(info_dict, None)
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
def test_automatic_captions(self):
|
def test_automatic_captions(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['writeautomaticsub'] = True
|
self.DL.params['writeautomaticsub'] = True
|
||||||
self.DL.params['subtitleslang'] = ['en']
|
self.DL.params['subtitleslang'] = ['en']
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
@ -203,7 +204,7 @@ class TestTedSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['subtitleslangs'] = langs
|
self.DL.params['subtitleslangs'] = langs
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
for lang in langs:
|
for lang in langs:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
class TestBlipTVSubtitles(BaseTestSubtitles):
|
class TestBlipTVSubtitles(BaseTestSubtitles):
|
||||||
@ -211,13 +212,13 @@ class TestBlipTVSubtitles(BaseTestSubtitles):
|
|||||||
IE = BlipTVIE
|
IE = BlipTVIE
|
||||||
|
|
||||||
def test_list_subtitles(self):
|
def test_list_subtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['listsubtitles'] = True
|
self.DL.params['listsubtitles'] = True
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
self.assertEqual(info_dict, None)
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
@ -251,20 +252,20 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
||||||
|
|
||||||
def test_list_subtitles(self):
|
def test_list_subtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['listsubtitles'] = True
|
self.DL.params['listsubtitles'] = True
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
self.assertEqual(info_dict, None)
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
def test_automatic_captions(self):
|
def test_automatic_captions(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['writeautomaticsub'] = True
|
self.DL.params['writeautomaticsub'] = True
|
||||||
self.DL.params['subtitleslang'] = ['en']
|
self.DL.params['subtitleslang'] = ['en']
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
self.assertTrue(len(subtitles.keys()) == 0)
|
self.assertTrue(len(subtitles.keys()) == 0)
|
||||||
|
|
||||||
def test_nosubtitles(self):
|
def test_nosubtitles(self):
|
||||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'http://vimeo.com/56015672'
|
self.url = 'http://vimeo.com/56015672'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
@ -277,7 +278,7 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
|||||||
self.DL.params['subtitleslangs'] = langs
|
self.DL.params['subtitleslangs'] = langs
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
for lang in langs:
|
for lang in langs:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||||
|
|
||||||
|
|
||||||
class TestWallaSubtitles(BaseTestSubtitles):
|
class TestWallaSubtitles(BaseTestSubtitles):
|
||||||
@ -285,13 +286,13 @@ class TestWallaSubtitles(BaseTestSubtitles):
|
|||||||
IE = WallaIE
|
IE = WallaIE
|
||||||
|
|
||||||
def test_list_subtitles(self):
|
def test_list_subtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['listsubtitles'] = True
|
self.DL.params['listsubtitles'] = True
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
self.assertEqual(info_dict, None)
|
self.assertEqual(info_dict, None)
|
||||||
|
|
||||||
def test_allsubtitles(self):
|
def test_allsubtitles(self):
|
||||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
subtitles = self.getSubtitles()
|
subtitles = self.getSubtitles()
|
||||||
@ -299,7 +300,7 @@ class TestWallaSubtitles(BaseTestSubtitles):
|
|||||||
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
|
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
|
||||||
|
|
||||||
def test_nosubtitles(self):
|
def test_nosubtitles(self):
|
||||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
|
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
self.DL.params['allsubtitles'] = True
|
self.DL.params['allsubtitles'] = True
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
@ -37,7 +38,9 @@ def _make_testfunc(testfile):
|
|||||||
or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
|
or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
|
||||||
# Recompile
|
# Recompile
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(['mxmlc', '-output', swf_file, as_file])
|
subprocess.check_call([
|
||||||
|
'mxmlc', '-output', swf_file,
|
||||||
|
'-static-link-runtime-shared-libraries', as_file])
|
||||||
except OSError as ose:
|
except OSError as ose:
|
||||||
if ose.errno == errno.ENOENT:
|
if ose.errno == errno.ENOENT:
|
||||||
print('mxmlc not found! Skipping test.')
|
print('mxmlc not found! Skipping test.')
|
||||||
|
@ -16,11 +16,11 @@ import json
|
|||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from youtube_dl.utils import (
|
from youtube_dl.utils import (
|
||||||
|
clean_html,
|
||||||
DateRange,
|
DateRange,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
get_meta_content,
|
|
||||||
orderedSet,
|
orderedSet,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
@ -46,8 +46,7 @@ from youtube_dl.utils import (
|
|||||||
escape_url,
|
escape_url,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
get_filesystem_encoding,
|
get_filesystem_encoding,
|
||||||
compat_getenv,
|
intlist_to_bytes,
|
||||||
compat_expanduser,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -157,17 +156,6 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
||||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
||||||
|
|
||||||
def test_meta_parser(self):
|
|
||||||
testhtml = '''
|
|
||||||
<head>
|
|
||||||
<meta name="description" content="foo & bar">
|
|
||||||
<meta content='Plato' name='author'/>
|
|
||||||
</head>
|
|
||||||
'''
|
|
||||||
get_meta = lambda name: get_meta_content(name, testhtml)
|
|
||||||
self.assertEqual(get_meta('description'), 'foo & bar')
|
|
||||||
self.assertEqual(get_meta('author'), 'Plato')
|
|
||||||
|
|
||||||
def test_xpath_with_ns(self):
|
def test_xpath_with_ns(self):
|
||||||
testxml = '''<root xmlns:media="http://example.com/">
|
testxml = '''<root xmlns:media="http://example.com/">
|
||||||
<media:song>
|
<media:song>
|
||||||
@ -230,6 +218,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_duration('0m0s'), 0)
|
self.assertEqual(parse_duration('0m0s'), 0)
|
||||||
self.assertEqual(parse_duration('0s'), 0)
|
self.assertEqual(parse_duration('0s'), 0)
|
||||||
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
|
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
|
||||||
|
self.assertEqual(parse_duration('T30M38S'), 1838)
|
||||||
|
|
||||||
def test_fix_xml_ampersands(self):
|
def test_fix_xml_ampersands(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@ -296,6 +285,10 @@ class TestUtil(unittest.TestCase):
|
|||||||
d = json.loads(stripped)
|
d = json.loads(stripped)
|
||||||
self.assertEqual(d, [{"id": "532cb", "x": 3}])
|
self.assertEqual(d, [{"id": "532cb", "x": 3}])
|
||||||
|
|
||||||
|
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
|
||||||
|
d = json.loads(stripped)
|
||||||
|
self.assertEqual(d, {'STATUS': 'OK'})
|
||||||
|
|
||||||
def test_uppercase_escape(self):
|
def test_uppercase_escape(self):
|
||||||
self.assertEqual(uppercase_escape('aä'), 'aä')
|
self.assertEqual(uppercase_escape('aä'), 'aä')
|
||||||
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
|
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
|
||||||
@ -359,17 +352,14 @@ class TestUtil(unittest.TestCase):
|
|||||||
on = js_to_json('{"abc": true}')
|
on = js_to_json('{"abc": true}')
|
||||||
self.assertEqual(json.loads(on), {'abc': True})
|
self.assertEqual(json.loads(on), {'abc': True})
|
||||||
|
|
||||||
def test_compat_getenv(self):
|
def test_clean_html(self):
|
||||||
test_str = 'тест'
|
self.assertEqual(clean_html('a:\nb'), 'a: b')
|
||||||
os.environ['YOUTUBE-DL-TEST'] = (test_str if sys.version_info >= (3, 0)
|
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
|
||||||
else test_str.encode(get_filesystem_encoding()))
|
|
||||||
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
|
||||||
|
|
||||||
def test_compat_expanduser(self):
|
def test_intlist_to_bytes(self):
|
||||||
test_str = 'C:\Documents and Settings\тест\Application Data'
|
self.assertEqual(
|
||||||
os.environ['HOME'] = (test_str if sys.version_info >= (3, 0)
|
intlist_to_bytes([0, 1, 127, 128, 255]),
|
||||||
else test_str.encode(get_filesystem_encoding()))
|
b'\x00\x01\x7f\x80\xff')
|
||||||
self.assertEqual(compat_expanduser('~'), test_str)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -14,7 +14,7 @@ import re
|
|||||||
import string
|
import string
|
||||||
|
|
||||||
from youtube_dl.extractor import YoutubeIE
|
from youtube_dl.extractor import YoutubeIE
|
||||||
from youtube_dl.utils import compat_str, compat_urlretrieve
|
from youtube_dl.compat import compat_str, compat_urlretrieve
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
(
|
(
|
||||||
|
@ -22,13 +22,15 @@ import traceback
|
|||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
import ctypes
|
import ctypes
|
||||||
|
|
||||||
from .utils import (
|
from .compat import (
|
||||||
compat_cookiejar,
|
compat_cookiejar,
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_http_client,
|
compat_http_client,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
|
)
|
||||||
|
from .utils import (
|
||||||
escape_url,
|
escape_url,
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
date_from_str,
|
date_from_str,
|
||||||
@ -62,6 +64,7 @@ from .utils import (
|
|||||||
from .cache import Cache
|
from .cache import Cache
|
||||||
from .extractor import get_info_extractor, gen_extractors
|
from .extractor import get_info_extractor, gen_extractors
|
||||||
from .downloader import get_suitable_downloader
|
from .downloader import get_suitable_downloader
|
||||||
|
from .downloader.rtmp import rtmpdump_version
|
||||||
from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
|
from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
|
||||||
from .version import __version__
|
from .version import __version__
|
||||||
|
|
||||||
@ -621,7 +624,7 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
return self.process_ie_result(
|
return self.process_ie_result(
|
||||||
new_result, download=download, extra_info=extra_info)
|
new_result, download=download, extra_info=extra_info)
|
||||||
elif result_type == 'playlist':
|
elif result_type == 'playlist' or result_type == 'multi_video':
|
||||||
# We process each entry in the playlist
|
# We process each entry in the playlist
|
||||||
playlist = ie_result.get('title', None) or ie_result.get('id', None)
|
playlist = ie_result.get('title', None) or ie_result.get('id', None)
|
||||||
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
||||||
@ -655,6 +658,8 @@ class YoutubeDL(object):
|
|||||||
extra = {
|
extra = {
|
||||||
'n_entries': n_entries,
|
'n_entries': n_entries,
|
||||||
'playlist': playlist,
|
'playlist': playlist,
|
||||||
|
'playlist_id': ie_result.get('id'),
|
||||||
|
'playlist_title': ie_result.get('title'),
|
||||||
'playlist_index': i + playliststart,
|
'playlist_index': i + playliststart,
|
||||||
'extractor': ie_result['extractor'],
|
'extractor': ie_result['extractor'],
|
||||||
'webpage_url': ie_result['webpage_url'],
|
'webpage_url': ie_result['webpage_url'],
|
||||||
@ -674,6 +679,9 @@ class YoutubeDL(object):
|
|||||||
ie_result['entries'] = playlist_results
|
ie_result['entries'] = playlist_results
|
||||||
return ie_result
|
return ie_result
|
||||||
elif result_type == 'compat_list':
|
elif result_type == 'compat_list':
|
||||||
|
self.report_warning(
|
||||||
|
'Extractor %s returned a compat_list result. '
|
||||||
|
'It needs to be updated.' % ie_result.get('extractor'))
|
||||||
def _fixup(r):
|
def _fixup(r):
|
||||||
self.add_extra_info(r,
|
self.add_extra_info(r,
|
||||||
{
|
{
|
||||||
@ -833,6 +841,13 @@ class YoutubeDL(object):
|
|||||||
formats_info = (self.select_format(format_1, formats),
|
formats_info = (self.select_format(format_1, formats),
|
||||||
self.select_format(format_2, formats))
|
self.select_format(format_2, formats))
|
||||||
if all(formats_info):
|
if all(formats_info):
|
||||||
|
# The first format must contain the video and the
|
||||||
|
# second the audio
|
||||||
|
if formats_info[0].get('vcodec') == 'none':
|
||||||
|
self.report_error('The first format must '
|
||||||
|
'contain the video, try using '
|
||||||
|
'"-f %s+%s"' % (format_2, format_1))
|
||||||
|
return
|
||||||
selected_format = {
|
selected_format = {
|
||||||
'requested_formats': formats_info,
|
'requested_formats': formats_info,
|
||||||
'format': rf,
|
'format': rf,
|
||||||
@ -989,7 +1004,7 @@ class YoutubeDL(object):
|
|||||||
else:
|
else:
|
||||||
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
|
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
|
||||||
try:
|
try:
|
||||||
write_json_file(info_dict, encodeFilename(infofn))
|
write_json_file(info_dict, infofn)
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
self.report_error('Cannot write metadata to JSON file ' + infofn)
|
self.report_error('Cannot write metadata to JSON file ' + infofn)
|
||||||
return
|
return
|
||||||
@ -1294,11 +1309,13 @@ class YoutubeDL(object):
|
|||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Your Python is broken! Update to a newer and supported version')
|
'Your Python is broken! Update to a newer and supported version')
|
||||||
|
|
||||||
|
stdout_encoding = getattr(
|
||||||
|
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
|
||||||
encoding_str = (
|
encoding_str = (
|
||||||
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
|
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
|
||||||
locale.getpreferredencoding(),
|
locale.getpreferredencoding(),
|
||||||
sys.getfilesystemencoding(),
|
sys.getfilesystemencoding(),
|
||||||
sys.stdout.encoding,
|
stdout_encoding,
|
||||||
self.get_encoding()))
|
self.get_encoding()))
|
||||||
write_string(encoding_str, encoding=None)
|
write_string(encoding_str, encoding=None)
|
||||||
|
|
||||||
@ -1321,6 +1338,7 @@ class YoutubeDL(object):
|
|||||||
platform.python_version(), platform_name()))
|
platform.python_version(), platform_name()))
|
||||||
|
|
||||||
exe_versions = FFmpegPostProcessor.get_versions()
|
exe_versions = FFmpegPostProcessor.get_versions()
|
||||||
|
exe_versions['rtmpdump'] = rtmpdump_version()
|
||||||
exe_str = ', '.join(
|
exe_str = ', '.join(
|
||||||
'%s %s' % (exe, v)
|
'%s %s' % (exe, v)
|
||||||
for exe, v in sorted(exe_versions.items())
|
for exe, v in sorted(exe_versions.items())
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
|
||||||
import codecs
|
import codecs
|
||||||
@ -13,10 +15,13 @@ import sys
|
|||||||
from .options import (
|
from .options import (
|
||||||
parseOpts,
|
parseOpts,
|
||||||
)
|
)
|
||||||
from .utils import (
|
from .compat import (
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_getpass,
|
compat_getpass,
|
||||||
compat_print,
|
compat_print,
|
||||||
|
workaround_optparse_bug9161,
|
||||||
|
)
|
||||||
|
from .utils import (
|
||||||
DateRange,
|
DateRange,
|
||||||
DEFAULT_OUTTMPL,
|
DEFAULT_OUTTMPL,
|
||||||
decodeOption,
|
decodeOption,
|
||||||
@ -53,7 +58,9 @@ def _real_main(argv=None):
|
|||||||
# https://github.com/rg3/youtube-dl/issues/820
|
# https://github.com/rg3/youtube-dl/issues/820
|
||||||
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
|
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
|
||||||
|
|
||||||
setproctitle(u'youtube-dl')
|
workaround_optparse_bug9161()
|
||||||
|
|
||||||
|
setproctitle('youtube-dl')
|
||||||
|
|
||||||
parser, opts, args = parseOpts(argv)
|
parser, opts, args = parseOpts(argv)
|
||||||
|
|
||||||
@ -69,10 +76,10 @@ def _real_main(argv=None):
|
|||||||
if opts.headers is not None:
|
if opts.headers is not None:
|
||||||
for h in opts.headers:
|
for h in opts.headers:
|
||||||
if h.find(':', 1) < 0:
|
if h.find(':', 1) < 0:
|
||||||
parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h)
|
parser.error('wrong header formatting, it should be key:value, not "%s"'%h)
|
||||||
key, value = h.split(':', 2)
|
key, value = h.split(':', 2)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value))
|
write_string('[debug] Adding header from command line option %s:%s\n'%(key, value))
|
||||||
std_headers[key] = value
|
std_headers[key] = value
|
||||||
|
|
||||||
# Dump user agent
|
# Dump user agent
|
||||||
@ -90,9 +97,9 @@ def _real_main(argv=None):
|
|||||||
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
|
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
|
||||||
batch_urls = read_batch_urls(batchfd)
|
batch_urls = read_batch_urls(batchfd)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n')
|
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
||||||
except IOError:
|
except IOError:
|
||||||
sys.exit(u'ERROR: batch file could not be read')
|
sys.exit('ERROR: batch file could not be read')
|
||||||
all_urls = batch_urls + args
|
all_urls = batch_urls + args
|
||||||
all_urls = [url.strip() for url in all_urls]
|
all_urls = [url.strip() for url in all_urls]
|
||||||
_enc = preferredencoding()
|
_enc = preferredencoding()
|
||||||
@ -105,7 +112,7 @@ def _real_main(argv=None):
|
|||||||
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
|
||||||
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
matchedUrls = [url for url in all_urls if ie.suitable(url)]
|
||||||
for mu in matchedUrls:
|
for mu in matchedUrls:
|
||||||
compat_print(u' ' + mu)
|
compat_print(' ' + mu)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
if opts.list_extractor_descriptions:
|
if opts.list_extractor_descriptions:
|
||||||
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
|
||||||
@ -115,63 +122,63 @@ def _real_main(argv=None):
|
|||||||
if desc is False:
|
if desc is False:
|
||||||
continue
|
continue
|
||||||
if hasattr(ie, 'SEARCH_KEY'):
|
if hasattr(ie, 'SEARCH_KEY'):
|
||||||
_SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny')
|
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny')
|
||||||
_COUNTS = (u'', u'5', u'10', u'all')
|
_COUNTS = ('', '5', '10', 'all')
|
||||||
desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
|
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
|
||||||
compat_print(desc)
|
compat_print(desc)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
# Conflicting, missing and erroneous options
|
# Conflicting, missing and erroneous options
|
||||||
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
if opts.usenetrc and (opts.username is not None or opts.password is not None):
|
||||||
parser.error(u'using .netrc conflicts with giving username/password')
|
parser.error('using .netrc conflicts with giving username/password')
|
||||||
if opts.password is not None and opts.username is None:
|
if opts.password is not None and opts.username is None:
|
||||||
parser.error(u'account username missing\n')
|
parser.error('account username missing\n')
|
||||||
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
|
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
|
||||||
parser.error(u'using output template conflicts with using title, video ID or auto number')
|
parser.error('using output template conflicts with using title, video ID or auto number')
|
||||||
if opts.usetitle and opts.useid:
|
if opts.usetitle and opts.useid:
|
||||||
parser.error(u'using title conflicts with using video ID')
|
parser.error('using title conflicts with using video ID')
|
||||||
if opts.username is not None and opts.password is None:
|
if opts.username is not None and opts.password is None:
|
||||||
opts.password = compat_getpass(u'Type account password and press [Return]: ')
|
opts.password = compat_getpass('Type account password and press [Return]: ')
|
||||||
if opts.ratelimit is not None:
|
if opts.ratelimit is not None:
|
||||||
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
|
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
|
||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error(u'invalid rate limit specified')
|
parser.error('invalid rate limit specified')
|
||||||
opts.ratelimit = numeric_limit
|
opts.ratelimit = numeric_limit
|
||||||
if opts.min_filesize is not None:
|
if opts.min_filesize is not None:
|
||||||
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
|
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
|
||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error(u'invalid min_filesize specified')
|
parser.error('invalid min_filesize specified')
|
||||||
opts.min_filesize = numeric_limit
|
opts.min_filesize = numeric_limit
|
||||||
if opts.max_filesize is not None:
|
if opts.max_filesize is not None:
|
||||||
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
|
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
|
||||||
if numeric_limit is None:
|
if numeric_limit is None:
|
||||||
parser.error(u'invalid max_filesize specified')
|
parser.error('invalid max_filesize specified')
|
||||||
opts.max_filesize = numeric_limit
|
opts.max_filesize = numeric_limit
|
||||||
if opts.retries is not None:
|
if opts.retries is not None:
|
||||||
try:
|
try:
|
||||||
opts.retries = int(opts.retries)
|
opts.retries = int(opts.retries)
|
||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
parser.error(u'invalid retry count specified')
|
parser.error('invalid retry count specified')
|
||||||
if opts.buffersize is not None:
|
if opts.buffersize is not None:
|
||||||
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
||||||
if numeric_buffersize is None:
|
if numeric_buffersize is None:
|
||||||
parser.error(u'invalid buffer size specified')
|
parser.error('invalid buffer size specified')
|
||||||
opts.buffersize = numeric_buffersize
|
opts.buffersize = numeric_buffersize
|
||||||
if opts.playliststart <= 0:
|
if opts.playliststart <= 0:
|
||||||
raise ValueError(u'Playlist start must be positive')
|
raise ValueError('Playlist start must be positive')
|
||||||
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
|
||||||
raise ValueError(u'Playlist end must be greater than playlist start')
|
raise ValueError('Playlist end must be greater than playlist start')
|
||||||
if opts.extractaudio:
|
if opts.extractaudio:
|
||||||
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
|
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
|
||||||
parser.error(u'invalid audio format specified')
|
parser.error('invalid audio format specified')
|
||||||
if opts.audioquality:
|
if opts.audioquality:
|
||||||
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
||||||
if not opts.audioquality.isdigit():
|
if not opts.audioquality.isdigit():
|
||||||
parser.error(u'invalid audio quality specified')
|
parser.error('invalid audio quality specified')
|
||||||
if opts.recodevideo is not None:
|
if opts.recodevideo is not None:
|
||||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
||||||
parser.error(u'invalid video recode format specified')
|
parser.error('invalid video recode format specified')
|
||||||
if opts.date is not None:
|
if opts.date is not None:
|
||||||
date = DateRange.day(opts.date)
|
date = DateRange.day(opts.date)
|
||||||
else:
|
else:
|
||||||
@ -191,17 +198,17 @@ def _real_main(argv=None):
|
|||||||
if opts.outtmpl is not None:
|
if opts.outtmpl is not None:
|
||||||
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
||||||
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
|
outtmpl =((opts.outtmpl is not None and opts.outtmpl)
|
||||||
or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s')
|
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||||
or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s')
|
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
|
||||||
or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||||
or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s')
|
or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
|
||||||
or (opts.useid and u'%(id)s.%(ext)s')
|
or (opts.useid and '%(id)s.%(ext)s')
|
||||||
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
|
or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
|
||||||
or DEFAULT_OUTTMPL)
|
or DEFAULT_OUTTMPL)
|
||||||
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
||||||
parser.error(u'Cannot download a video and extract audio into the same'
|
parser.error('Cannot download a video and extract audio into the same'
|
||||||
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||||
u' template'.format(outtmpl))
|
' template'.format(outtmpl))
|
||||||
|
|
||||||
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||||
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
||||||
@ -328,7 +335,7 @@ def _real_main(argv=None):
|
|||||||
# Maybe do nothing
|
# Maybe do nothing
|
||||||
if (len(all_urls) < 1) and (opts.load_info_filename is None):
|
if (len(all_urls) < 1) and (opts.load_info_filename is None):
|
||||||
if not (opts.update_self or opts.rm_cachedir):
|
if not (opts.update_self or opts.rm_cachedir):
|
||||||
parser.error(u'you must provide at least one URL')
|
parser.error('you must provide at least one URL')
|
||||||
else:
|
else:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
@ -338,7 +345,7 @@ def _real_main(argv=None):
|
|||||||
else:
|
else:
|
||||||
retcode = ydl.download(all_urls)
|
retcode = ydl.download(all_urls)
|
||||||
except MaxDownloadsReached:
|
except MaxDownloadsReached:
|
||||||
ydl.to_screen(u'--max-download limit reached, aborting.')
|
ydl.to_screen('--max-download limit reached, aborting.')
|
||||||
retcode = 101
|
retcode = 101
|
||||||
|
|
||||||
sys.exit(retcode)
|
sys.exit(retcode)
|
||||||
@ -350,6 +357,6 @@ def main(argv=None):
|
|||||||
except DownloadError:
|
except DownloadError:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
except SameFileError:
|
except SameFileError:
|
||||||
sys.exit(u'ERROR: fixed output name but more than one file to download')
|
sys.exit('ERROR: fixed output name but more than one file to download')
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
sys.exit(u'\nERROR: Interrupted by user')
|
sys.exit('\nERROR: Interrupted by user')
|
||||||
|
@ -8,10 +8,8 @@ import re
|
|||||||
import shutil
|
import shutil
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from .utils import (
|
from .compat import compat_expanduser, compat_getenv
|
||||||
compat_expanduser,
|
from .utils import write_json_file
|
||||||
write_json_file,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Cache(object):
|
class Cache(object):
|
||||||
@ -21,7 +19,7 @@ class Cache(object):
|
|||||||
def _get_root_dir(self):
|
def _get_root_dir(self):
|
||||||
res = self._ydl.params.get('cachedir')
|
res = self._ydl.params.get('cachedir')
|
||||||
if res is None:
|
if res is None:
|
||||||
cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache')
|
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
|
||||||
res = os.path.join(cache_root, 'youtube-dl')
|
res = os.path.join(cache_root, 'youtube-dl')
|
||||||
return compat_expanduser(res)
|
return compat_expanduser(res)
|
||||||
|
|
||||||
|
350
youtube_dl/compat.py
Normal file
350
youtube_dl/compat.py
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import getpass
|
||||||
|
import optparse
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.request as compat_urllib_request
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urllib2 as compat_urllib_request
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.error as compat_urllib_error
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urllib2 as compat_urllib_error
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.parse as compat_urllib_parse
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urllib as compat_urllib_parse
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import urlparse as compat_urllib_parse_urlparse
|
||||||
|
except ImportError: # Python 2
|
||||||
|
from urlparse import urlparse as compat_urllib_parse_urlparse
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib.parse as compat_urlparse
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import urlparse as compat_urlparse
|
||||||
|
|
||||||
|
try:
|
||||||
|
import http.cookiejar as compat_cookiejar
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import cookielib as compat_cookiejar
|
||||||
|
|
||||||
|
try:
|
||||||
|
import html.entities as compat_html_entities
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import htmlentitydefs as compat_html_entities
|
||||||
|
|
||||||
|
try:
|
||||||
|
import html.parser as compat_html_parser
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import HTMLParser as compat_html_parser
|
||||||
|
|
||||||
|
try:
|
||||||
|
import http.client as compat_http_client
|
||||||
|
except ImportError: # Python 2
|
||||||
|
import httplib as compat_http_client
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.error import HTTPError as compat_HTTPError
|
||||||
|
except ImportError: # Python 2
|
||||||
|
from urllib2 import HTTPError as compat_HTTPError
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.request import urlretrieve as compat_urlretrieve
|
||||||
|
except ImportError: # Python 2
|
||||||
|
from urllib import urlretrieve as compat_urlretrieve
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from subprocess import DEVNULL
|
||||||
|
compat_subprocess_get_DEVNULL = lambda: DEVNULL
|
||||||
|
except ImportError:
|
||||||
|
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import unquote as compat_urllib_parse_unquote
|
||||||
|
except ImportError:
|
||||||
|
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
||||||
|
if string == '':
|
||||||
|
return string
|
||||||
|
res = string.split('%')
|
||||||
|
if len(res) == 1:
|
||||||
|
return string
|
||||||
|
if encoding is None:
|
||||||
|
encoding = 'utf-8'
|
||||||
|
if errors is None:
|
||||||
|
errors = 'replace'
|
||||||
|
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
|
||||||
|
pct_sequence = b''
|
||||||
|
string = res[0]
|
||||||
|
for item in res[1:]:
|
||||||
|
try:
|
||||||
|
if not item:
|
||||||
|
raise ValueError
|
||||||
|
pct_sequence += item[:2].decode('hex')
|
||||||
|
rest = item[2:]
|
||||||
|
if not rest:
|
||||||
|
# This segment was just a single percent-encoded character.
|
||||||
|
# May be part of a sequence of code units, so delay decoding.
|
||||||
|
# (Stored in pct_sequence).
|
||||||
|
continue
|
||||||
|
except ValueError:
|
||||||
|
rest = '%' + item
|
||||||
|
# Encountered non-percent-encoded characters. Flush the current
|
||||||
|
# pct_sequence.
|
||||||
|
string += pct_sequence.decode(encoding, errors) + rest
|
||||||
|
pct_sequence = b''
|
||||||
|
if pct_sequence:
|
||||||
|
# Flush the final pct_sequence
|
||||||
|
string += pct_sequence.decode(encoding, errors)
|
||||||
|
return string
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import parse_qs as compat_parse_qs
|
||||||
|
except ImportError: # Python 2
|
||||||
|
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
|
||||||
|
# Python 2's version is apparently totally broken
|
||||||
|
|
||||||
|
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
|
||||||
|
encoding='utf-8', errors='replace'):
|
||||||
|
qs, _coerce_result = qs, unicode
|
||||||
|
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
|
||||||
|
r = []
|
||||||
|
for name_value in pairs:
|
||||||
|
if not name_value and not strict_parsing:
|
||||||
|
continue
|
||||||
|
nv = name_value.split('=', 1)
|
||||||
|
if len(nv) != 2:
|
||||||
|
if strict_parsing:
|
||||||
|
raise ValueError("bad query field: %r" % (name_value,))
|
||||||
|
# Handle case of a control-name with no equal sign
|
||||||
|
if keep_blank_values:
|
||||||
|
nv.append('')
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
if len(nv[1]) or keep_blank_values:
|
||||||
|
name = nv[0].replace('+', ' ')
|
||||||
|
name = compat_urllib_parse_unquote(
|
||||||
|
name, encoding=encoding, errors=errors)
|
||||||
|
name = _coerce_result(name)
|
||||||
|
value = nv[1].replace('+', ' ')
|
||||||
|
value = compat_urllib_parse_unquote(
|
||||||
|
value, encoding=encoding, errors=errors)
|
||||||
|
value = _coerce_result(value)
|
||||||
|
r.append((name, value))
|
||||||
|
return r
|
||||||
|
|
||||||
|
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
|
||||||
|
encoding='utf-8', errors='replace'):
|
||||||
|
parsed_result = {}
|
||||||
|
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
|
||||||
|
encoding=encoding, errors=errors)
|
||||||
|
for name, value in pairs:
|
||||||
|
if name in parsed_result:
|
||||||
|
parsed_result[name].append(value)
|
||||||
|
else:
|
||||||
|
parsed_result[name] = [value]
|
||||||
|
return parsed_result
|
||||||
|
|
||||||
|
try:
|
||||||
|
compat_str = unicode # Python 2
|
||||||
|
except NameError:
|
||||||
|
compat_str = str
|
||||||
|
|
||||||
|
try:
|
||||||
|
compat_chr = unichr # Python 2
|
||||||
|
except NameError:
|
||||||
|
compat_chr = chr
|
||||||
|
|
||||||
|
try:
|
||||||
|
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
|
||||||
|
except ImportError: # Python 2.6
|
||||||
|
from xml.parsers.expat import ExpatError as compat_xml_parse_error
|
||||||
|
|
||||||
|
try:
|
||||||
|
from shlex import quote as shlex_quote
|
||||||
|
except ImportError: # Python < 3.3
|
||||||
|
def shlex_quote(s):
|
||||||
|
return "'" + s.replace("'", "'\"'\"'") + "'"
|
||||||
|
|
||||||
|
|
||||||
|
def compat_ord(c):
|
||||||
|
if type(c) is int: return c
|
||||||
|
else: return ord(c)
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 0):
|
||||||
|
compat_getenv = os.getenv
|
||||||
|
compat_expanduser = os.path.expanduser
|
||||||
|
else:
|
||||||
|
# Environment variables should be decoded with filesystem encoding.
|
||||||
|
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
|
||||||
|
|
||||||
|
def compat_getenv(key, default=None):
|
||||||
|
from .utils import get_filesystem_encoding
|
||||||
|
env = os.getenv(key, default)
|
||||||
|
if env:
|
||||||
|
env = env.decode(get_filesystem_encoding())
|
||||||
|
return env
|
||||||
|
|
||||||
|
# HACK: The default implementations of os.path.expanduser from cpython do not decode
|
||||||
|
# environment variables with filesystem encoding. We will work around this by
|
||||||
|
# providing adjusted implementations.
|
||||||
|
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
||||||
|
# for different platforms with correct environment variables decoding.
|
||||||
|
|
||||||
|
if os.name == 'posix':
|
||||||
|
def compat_expanduser(path):
|
||||||
|
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
||||||
|
do nothing."""
|
||||||
|
if not path.startswith('~'):
|
||||||
|
return path
|
||||||
|
i = path.find('/', 1)
|
||||||
|
if i < 0:
|
||||||
|
i = len(path)
|
||||||
|
if i == 1:
|
||||||
|
if 'HOME' not in os.environ:
|
||||||
|
import pwd
|
||||||
|
userhome = pwd.getpwuid(os.getuid()).pw_dir
|
||||||
|
else:
|
||||||
|
userhome = compat_getenv('HOME')
|
||||||
|
else:
|
||||||
|
import pwd
|
||||||
|
try:
|
||||||
|
pwent = pwd.getpwnam(path[1:i])
|
||||||
|
except KeyError:
|
||||||
|
return path
|
||||||
|
userhome = pwent.pw_dir
|
||||||
|
userhome = userhome.rstrip('/')
|
||||||
|
return (userhome + path[i:]) or '/'
|
||||||
|
elif os.name == 'nt' or os.name == 'ce':
|
||||||
|
def compat_expanduser(path):
|
||||||
|
"""Expand ~ and ~user constructs.
|
||||||
|
|
||||||
|
If user or $HOME is unknown, do nothing."""
|
||||||
|
if path[:1] != '~':
|
||||||
|
return path
|
||||||
|
i, n = 1, len(path)
|
||||||
|
while i < n and path[i] not in '/\\':
|
||||||
|
i = i + 1
|
||||||
|
|
||||||
|
if 'HOME' in os.environ:
|
||||||
|
userhome = compat_getenv('HOME')
|
||||||
|
elif 'USERPROFILE' in os.environ:
|
||||||
|
userhome = compat_getenv('USERPROFILE')
|
||||||
|
elif not 'HOMEPATH' in os.environ:
|
||||||
|
return path
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
drive = compat_getenv('HOMEDRIVE')
|
||||||
|
except KeyError:
|
||||||
|
drive = ''
|
||||||
|
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
|
||||||
|
|
||||||
|
if i != 1: #~user
|
||||||
|
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
|
||||||
|
|
||||||
|
return userhome + path[i:]
|
||||||
|
else:
|
||||||
|
compat_expanduser = os.path.expanduser
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info < (3, 0):
|
||||||
|
def compat_print(s):
|
||||||
|
from .utils import preferredencoding
|
||||||
|
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
|
||||||
|
else:
|
||||||
|
def compat_print(s):
|
||||||
|
assert type(s) == type(u'')
|
||||||
|
print(s)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess_check_output = subprocess.check_output
|
||||||
|
except AttributeError:
|
||||||
|
def subprocess_check_output(*args, **kwargs):
|
||||||
|
assert 'input' not in kwargs
|
||||||
|
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
|
||||||
|
output, _ = p.communicate()
|
||||||
|
ret = p.poll()
|
||||||
|
if ret:
|
||||||
|
raise subprocess.CalledProcessError(ret, p.args, output=output)
|
||||||
|
return output
|
||||||
|
|
||||||
|
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
||||||
|
def compat_getpass(prompt, *args, **kwargs):
|
||||||
|
if isinstance(prompt, compat_str):
|
||||||
|
from .utils import preferredencoding
|
||||||
|
prompt = prompt.encode(preferredencoding())
|
||||||
|
return getpass.getpass(prompt, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
compat_getpass = getpass.getpass
|
||||||
|
|
||||||
|
# Old 2.6 and 2.7 releases require kwargs to be bytes
|
||||||
|
try:
|
||||||
|
(lambda x: x)(**{'x': 0})
|
||||||
|
except TypeError:
|
||||||
|
def compat_kwargs(kwargs):
|
||||||
|
return dict((bytes(k), v) for k, v in kwargs.items())
|
||||||
|
else:
|
||||||
|
compat_kwargs = lambda kwargs: kwargs
|
||||||
|
|
||||||
|
|
||||||
|
# Fix https://github.com/rg3/youtube-dl/issues/4223
|
||||||
|
# See http://bugs.python.org/issue9161 for what is broken
|
||||||
|
def workaround_optparse_bug9161():
|
||||||
|
op = optparse.OptionParser()
|
||||||
|
og = optparse.OptionGroup(op, 'foo')
|
||||||
|
try:
|
||||||
|
og.add_option('-t')
|
||||||
|
except TypeError:
|
||||||
|
real_add_option = optparse.OptionGroup.add_option
|
||||||
|
|
||||||
|
def _compat_add_option(self, *args, **kwargs):
|
||||||
|
enc = lambda v: (
|
||||||
|
v.encode('ascii', 'replace') if isinstance(v, compat_str)
|
||||||
|
else v)
|
||||||
|
bargs = [enc(a) for a in args]
|
||||||
|
bkwargs = dict(
|
||||||
|
(k, enc(v)) for k, v in kwargs.items())
|
||||||
|
return real_add_option(self, *bargs, **bkwargs)
|
||||||
|
optparse.OptionGroup.add_option = _compat_add_option
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'compat_HTTPError',
|
||||||
|
'compat_chr',
|
||||||
|
'compat_cookiejar',
|
||||||
|
'compat_expanduser',
|
||||||
|
'compat_getenv',
|
||||||
|
'compat_getpass',
|
||||||
|
'compat_html_entities',
|
||||||
|
'compat_html_parser',
|
||||||
|
'compat_http_client',
|
||||||
|
'compat_kwargs',
|
||||||
|
'compat_ord',
|
||||||
|
'compat_parse_qs',
|
||||||
|
'compat_print',
|
||||||
|
'compat_str',
|
||||||
|
'compat_subprocess_get_DEVNULL',
|
||||||
|
'compat_urllib_error',
|
||||||
|
'compat_urllib_parse',
|
||||||
|
'compat_urllib_parse_unquote',
|
||||||
|
'compat_urllib_parse_urlparse',
|
||||||
|
'compat_urllib_request',
|
||||||
|
'compat_urlparse',
|
||||||
|
'compat_urlretrieve',
|
||||||
|
'compat_xml_parse_error',
|
||||||
|
'shlex_quote',
|
||||||
|
'subprocess_check_output',
|
||||||
|
'workaround_optparse_bug9161',
|
||||||
|
]
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
@ -159,14 +161,14 @@ class FileDownloader(object):
|
|||||||
|
|
||||||
def temp_name(self, filename):
|
def temp_name(self, filename):
|
||||||
"""Returns a temporary filename for the given filename."""
|
"""Returns a temporary filename for the given filename."""
|
||||||
if self.params.get('nopart', False) or filename == u'-' or \
|
if self.params.get('nopart', False) or filename == '-' or \
|
||||||
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
||||||
return filename
|
return filename
|
||||||
return filename + u'.part'
|
return filename + '.part'
|
||||||
|
|
||||||
def undo_temp_name(self, filename):
|
def undo_temp_name(self, filename):
|
||||||
if filename.endswith(u'.part'):
|
if filename.endswith('.part'):
|
||||||
return filename[:-len(u'.part')]
|
return filename[:-len('.part')]
|
||||||
return filename
|
return filename
|
||||||
|
|
||||||
def try_rename(self, old_filename, new_filename):
|
def try_rename(self, old_filename, new_filename):
|
||||||
@ -175,7 +177,7 @@ class FileDownloader(object):
|
|||||||
return
|
return
|
||||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||||
except (IOError, OSError) as err:
|
except (IOError, OSError) as err:
|
||||||
self.report_error(u'unable to rename file: %s' % compat_str(err))
|
self.report_error('unable to rename file: %s' % compat_str(err))
|
||||||
|
|
||||||
def try_utime(self, filename, last_modified_hdr):
|
def try_utime(self, filename, last_modified_hdr):
|
||||||
"""Try to set the last-modified time of the given file."""
|
"""Try to set the last-modified time of the given file."""
|
||||||
@ -200,10 +202,10 @@ class FileDownloader(object):
|
|||||||
|
|
||||||
def report_destination(self, filename):
|
def report_destination(self, filename):
|
||||||
"""Report destination filename."""
|
"""Report destination filename."""
|
||||||
self.to_screen(u'[download] Destination: ' + filename)
|
self.to_screen('[download] Destination: ' + filename)
|
||||||
|
|
||||||
def _report_progress_status(self, msg, is_last_line=False):
|
def _report_progress_status(self, msg, is_last_line=False):
|
||||||
fullmsg = u'[download] ' + msg
|
fullmsg = '[download] ' + msg
|
||||||
if self.params.get('progress_with_newline', False):
|
if self.params.get('progress_with_newline', False):
|
||||||
self.to_screen(fullmsg)
|
self.to_screen(fullmsg)
|
||||||
else:
|
else:
|
||||||
@ -211,13 +213,13 @@ class FileDownloader(object):
|
|||||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
prev_len = getattr(self, '_report_progress_prev_line_length',
|
||||||
0)
|
0)
|
||||||
if prev_len > len(fullmsg):
|
if prev_len > len(fullmsg):
|
||||||
fullmsg += u' ' * (prev_len - len(fullmsg))
|
fullmsg += ' ' * (prev_len - len(fullmsg))
|
||||||
self._report_progress_prev_line_length = len(fullmsg)
|
self._report_progress_prev_line_length = len(fullmsg)
|
||||||
clear_line = u'\r'
|
clear_line = '\r'
|
||||||
else:
|
else:
|
||||||
clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
|
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
|
||||||
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
||||||
self.to_console_title(u'youtube-dl ' + msg)
|
self.to_console_title('youtube-dl ' + msg)
|
||||||
|
|
||||||
def report_progress(self, percent, data_len_str, speed, eta):
|
def report_progress(self, percent, data_len_str, speed, eta):
|
||||||
"""Report download progress."""
|
"""Report download progress."""
|
||||||
@ -233,7 +235,7 @@ class FileDownloader(object):
|
|||||||
percent_str = 'Unknown %'
|
percent_str = 'Unknown %'
|
||||||
speed_str = self.format_speed(speed)
|
speed_str = self.format_speed(speed)
|
||||||
|
|
||||||
msg = (u'%s of %s at %s ETA %s' %
|
msg = ('%s of %s at %s ETA %s' %
|
||||||
(percent_str, data_len_str, speed_str, eta_str))
|
(percent_str, data_len_str, speed_str, eta_str))
|
||||||
self._report_progress_status(msg)
|
self._report_progress_status(msg)
|
||||||
|
|
||||||
@ -243,37 +245,37 @@ class FileDownloader(object):
|
|||||||
downloaded_str = format_bytes(downloaded_data_len)
|
downloaded_str = format_bytes(downloaded_data_len)
|
||||||
speed_str = self.format_speed(speed)
|
speed_str = self.format_speed(speed)
|
||||||
elapsed_str = FileDownloader.format_seconds(elapsed)
|
elapsed_str = FileDownloader.format_seconds(elapsed)
|
||||||
msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
|
msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
|
||||||
self._report_progress_status(msg)
|
self._report_progress_status(msg)
|
||||||
|
|
||||||
def report_finish(self, data_len_str, tot_time):
|
def report_finish(self, data_len_str, tot_time):
|
||||||
"""Report download finished."""
|
"""Report download finished."""
|
||||||
if self.params.get('noprogress', False):
|
if self.params.get('noprogress', False):
|
||||||
self.to_screen(u'[download] Download completed')
|
self.to_screen('[download] Download completed')
|
||||||
else:
|
else:
|
||||||
self._report_progress_status(
|
self._report_progress_status(
|
||||||
(u'100%% of %s in %s' %
|
('100%% of %s in %s' %
|
||||||
(data_len_str, self.format_seconds(tot_time))),
|
(data_len_str, self.format_seconds(tot_time))),
|
||||||
is_last_line=True)
|
is_last_line=True)
|
||||||
|
|
||||||
def report_resuming_byte(self, resume_len):
|
def report_resuming_byte(self, resume_len):
|
||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
|
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
||||||
|
|
||||||
def report_retry(self, count, retries):
|
def report_retry(self, count, retries):
|
||||||
"""Report retry in case of HTTP error 5xx"""
|
"""Report retry in case of HTTP error 5xx"""
|
||||||
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
||||||
|
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, file_name):
|
||||||
"""Report file has already been fully downloaded."""
|
"""Report file has already been fully downloaded."""
|
||||||
try:
|
try:
|
||||||
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
self.to_screen('[download] %s has already been downloaded' % file_name)
|
||||||
except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
self.to_screen(u'[download] The file has already been downloaded')
|
self.to_screen('[download] The file has already been downloaded')
|
||||||
|
|
||||||
def report_unable_to_resume(self):
|
def report_unable_to_resume(self):
|
||||||
"""Report it was impossible to resume download."""
|
"""Report it was impossible to resume download."""
|
||||||
self.to_screen(u'[download] Unable to resume')
|
self.to_screen('[download] Unable to resume')
|
||||||
|
|
||||||
def download(self, filename, info_dict):
|
def download(self, filename, info_dict):
|
||||||
"""Download to a filename using the info from info_dict
|
"""Download to a filename using the info from info_dict
|
||||||
@ -293,7 +295,7 @@ class FileDownloader(object):
|
|||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
"""Real download process. Redefine in subclasses."""
|
"""Real download process. Redefine in subclasses."""
|
||||||
raise NotImplementedError(u'This method must be implemented by subclasses')
|
raise NotImplementedError('This method must be implemented by subclasses')
|
||||||
|
|
||||||
def _hook_progress(self, status):
|
def _hook_progress(self, status):
|
||||||
for ph in self._progress_hooks:
|
for ph in self._progress_hooks:
|
||||||
|
@ -12,9 +12,15 @@ from ..utils import (
|
|||||||
compat_str,
|
compat_str,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
|
get_exe_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def rtmpdump_version():
|
||||||
|
return get_exe_version(
|
||||||
|
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
|
||||||
|
|
||||||
|
|
||||||
class RtmpFD(FileDownloader):
|
class RtmpFD(FileDownloader):
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
def run_rtmpdump(args):
|
def run_rtmpdump(args):
|
||||||
|
@ -115,6 +115,7 @@ from .fktv import (
|
|||||||
FKTVPosteckeIE,
|
FKTVPosteckeIE,
|
||||||
)
|
)
|
||||||
from .flickr import FlickrIE
|
from .flickr import FlickrIE
|
||||||
|
from .folketinget import FolketingetIE
|
||||||
from .fourtube import FourTubeIE
|
from .fourtube import FourTubeIE
|
||||||
from .franceculture import FranceCultureIE
|
from .franceculture import FranceCultureIE
|
||||||
from .franceinter import FranceInterIE
|
from .franceinter import FranceInterIE
|
||||||
@ -127,6 +128,7 @@ from .francetv import (
|
|||||||
)
|
)
|
||||||
from .freesound import FreesoundIE
|
from .freesound import FreesoundIE
|
||||||
from .freespeech import FreespeechIE
|
from .freespeech import FreespeechIE
|
||||||
|
from .freevideo import FreeVideoIE
|
||||||
from .funnyordie import FunnyOrDieIE
|
from .funnyordie import FunnyOrDieIE
|
||||||
from .gamekings import GamekingsIE
|
from .gamekings import GamekingsIE
|
||||||
from .gameone import (
|
from .gameone import (
|
||||||
@ -141,6 +143,7 @@ from .generic import GenericIE
|
|||||||
from .glide import GlideIE
|
from .glide import GlideIE
|
||||||
from .globo import GloboIE
|
from .globo import GloboIE
|
||||||
from .godtube import GodTubeIE
|
from .godtube import GodTubeIE
|
||||||
|
from .goldenmoustache import GoldenMoustacheIE
|
||||||
from .golem import GolemIE
|
from .golem import GolemIE
|
||||||
from .googleplus import GooglePlusIE
|
from .googleplus import GooglePlusIE
|
||||||
from .googlesearch import GoogleSearchIE
|
from .googlesearch import GoogleSearchIE
|
||||||
@ -323,6 +326,7 @@ from .sbs import SBSIE
|
|||||||
from .scivee import SciVeeIE
|
from .scivee import SciVeeIE
|
||||||
from .screencast import ScreencastIE
|
from .screencast import ScreencastIE
|
||||||
from .servingsys import ServingSysIE
|
from .servingsys import ServingSysIE
|
||||||
|
from .sexu import SexuIE
|
||||||
from .sexykarma import SexyKarmaIE
|
from .sexykarma import SexyKarmaIE
|
||||||
from .shared import SharedIE
|
from .shared import SharedIE
|
||||||
from .sharesix import ShareSixIE
|
from .sharesix import ShareSixIE
|
||||||
@ -376,6 +380,7 @@ from .teachingchannel import TeachingChannelIE
|
|||||||
from .teamcoco import TeamcocoIE
|
from .teamcoco import TeamcocoIE
|
||||||
from .techtalks import TechTalksIE
|
from .techtalks import TechTalksIE
|
||||||
from .ted import TEDIE
|
from .ted import TEDIE
|
||||||
|
from .telebruxelles import TeleBruxellesIE
|
||||||
from .telecinco import TelecincoIE
|
from .telecinco import TelecincoIE
|
||||||
from .telemb import TeleMBIE
|
from .telemb import TeleMBIE
|
||||||
from .tenplay import TenPlayIE
|
from .tenplay import TenPlayIE
|
||||||
@ -421,6 +426,7 @@ from .vesti import VestiIE
|
|||||||
from .vevo import VevoIE
|
from .vevo import VevoIE
|
||||||
from .vgtv import VGTVIE
|
from .vgtv import VGTVIE
|
||||||
from .vh1 import VH1IE
|
from .vh1 import VH1IE
|
||||||
|
from .vice import ViceIE
|
||||||
from .viddler import ViddlerIE
|
from .viddler import ViddlerIE
|
||||||
from .videobam import VideoBamIE
|
from .videobam import VideoBamIE
|
||||||
from .videodetective import VideoDetectiveIE
|
from .videodetective import VideoDetectiveIE
|
||||||
|
@ -11,13 +11,13 @@ class ABCIE(InfoExtractor):
|
|||||||
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
|
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.abc.net.au/news/2014-07-25/bringing-asylum-seekers-to-australia-would-give/5624716',
|
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||||
'md5': 'dad6f8ad011a70d9ddf887ce6d5d0742',
|
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5624716',
|
'id': '5868334',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Bringing asylum seekers to Australia would give them right to asylum claims: professor',
|
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
|
||||||
'description': 'md5:ba36fa5e27e5c9251fd929d339aea4af',
|
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,12 +3,13 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ class AllocineIE(InfoExtractor):
|
|||||||
'id': '19546517',
|
'id': '19546517',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
|
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
|
||||||
'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
|
'description': 'md5:abcd09ce503c6560512c14ebfdb720d2',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -5,13 +5,12 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
determine_ext,
|
|
||||||
get_element_by_id,
|
get_element_by_id,
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
qualities,
|
||||||
)
|
)
|
||||||
|
|
||||||
# There are different sources of video in arte.tv, the extraction process
|
# There are different sources of video in arte.tv, the extraction process
|
||||||
@ -102,79 +101,54 @@ class ArteTVPlus7IE(InfoExtractor):
|
|||||||
'upload_date': unified_strdate(upload_date_str),
|
'upload_date': unified_strdate(upload_date_str),
|
||||||
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
||||||
}
|
}
|
||||||
|
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
|
||||||
|
|
||||||
all_formats = []
|
formats = []
|
||||||
for format_id, format_dict in player_info['VSR'].items():
|
for format_id, format_dict in player_info['VSR'].items():
|
||||||
fmt = dict(format_dict)
|
f = dict(format_dict)
|
||||||
fmt['format_id'] = format_id
|
versionCode = f.get('versionCode')
|
||||||
all_formats.append(fmt)
|
|
||||||
# Some formats use the m3u8 protocol
|
|
||||||
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
|
|
||||||
def _match_lang(f):
|
|
||||||
if f.get('versionCode') is None:
|
|
||||||
return True
|
|
||||||
# Return true if that format is in the language of the url
|
|
||||||
if lang == 'fr':
|
|
||||||
l = 'F'
|
|
||||||
elif lang == 'de':
|
|
||||||
l = 'A'
|
|
||||||
else:
|
|
||||||
l = lang
|
|
||||||
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
|
|
||||||
return any(re.match(r, f['versionCode']) for r in regexes)
|
|
||||||
# Some formats may not be in the same language as the url
|
|
||||||
# TODO: Might want not to drop videos that does not match requested language
|
|
||||||
# but to process those formats with lower precedence
|
|
||||||
formats = filter(_match_lang, all_formats)
|
|
||||||
formats = list(formats) # in python3 filter returns an iterator
|
|
||||||
if not formats:
|
|
||||||
# Some videos are only available in the 'Originalversion'
|
|
||||||
# they aren't tagged as being in French or German
|
|
||||||
# Sometimes there are neither videos of requested lang code
|
|
||||||
# nor original version videos available
|
|
||||||
# For such cases we just take all_formats as is
|
|
||||||
formats = all_formats
|
|
||||||
if not formats:
|
|
||||||
raise ExtractorError('The formats list is empty')
|
|
||||||
|
|
||||||
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
|
langcode = {
|
||||||
def sort_key(f):
|
'fr': 'F',
|
||||||
return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
|
'de': 'A',
|
||||||
else:
|
}.get(lang, lang)
|
||||||
def sort_key(f):
|
lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
|
||||||
versionCode = f.get('versionCode')
|
lang_pref = (
|
||||||
if versionCode is None:
|
None if versionCode is None else (
|
||||||
versionCode = ''
|
10 if any(re.match(r, versionCode) for r in lang_rexs)
|
||||||
return (
|
else -10))
|
||||||
# Sort first by quality
|
source_pref = 0
|
||||||
int(f.get('height', -1)),
|
if versionCode is not None:
|
||||||
int(f.get('bitrate', -1)),
|
# The original version with subtitles has lower relevance
|
||||||
# The original version with subtitles has lower relevance
|
if re.match(r'VO-ST(F|A)', versionCode):
|
||||||
re.match(r'VO-ST(F|A)', versionCode) is None,
|
source_pref -= 10
|
||||||
# The version with sourds/mal subtitles has also lower relevance
|
# The version with sourds/mal subtitles has also lower relevance
|
||||||
re.match(r'VO?(F|A)-STM\1', versionCode) is None,
|
elif re.match(r'VO?(F|A)-STM\1', versionCode):
|
||||||
# Prefer http downloads over m3u8
|
source_pref -= 9
|
||||||
0 if f['url'].endswith('m3u8') else 1,
|
format = {
|
||||||
)
|
'format_id': format_id,
|
||||||
formats = sorted(formats, key=sort_key)
|
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
|
||||||
def _format(format_info):
|
'language_preference': lang_pref,
|
||||||
info = {
|
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
|
||||||
'format_id': format_info['format_id'],
|
'width': int_or_none(f.get('width')),
|
||||||
'format_note': '%s, %s' % (format_info.get('versionCode'), format_info.get('versionLibelle')),
|
'height': int_or_none(f.get('height')),
|
||||||
'width': int_or_none(format_info.get('width')),
|
'tbr': int_or_none(f.get('bitrate')),
|
||||||
'height': int_or_none(format_info.get('height')),
|
'quality': qfunc(f['quality']),
|
||||||
'tbr': int_or_none(format_info.get('bitrate')),
|
'source_preference': source_pref,
|
||||||
}
|
}
|
||||||
if format_info['mediaType'] == 'rtmp':
|
|
||||||
info['url'] = format_info['streamer']
|
|
||||||
info['play_path'] = 'mp4:' + format_info['url']
|
|
||||||
info['ext'] = 'flv'
|
|
||||||
else:
|
|
||||||
info['url'] = format_info['url']
|
|
||||||
info['ext'] = determine_ext(info['url'])
|
|
||||||
return info
|
|
||||||
info_dict['formats'] = [_format(f) for f in formats]
|
|
||||||
|
|
||||||
|
if f.get('mediaType') == 'rtmp':
|
||||||
|
format['url'] = f['streamer']
|
||||||
|
format['play_path'] = 'mp4:' + f['url']
|
||||||
|
format['ext'] = 'flv'
|
||||||
|
else:
|
||||||
|
format['url'] = f['url']
|
||||||
|
|
||||||
|
formats.append(format)
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
info_dict['formats'] = formats
|
||||||
return info_dict
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
|
@ -110,20 +110,25 @@ class BandcampAlbumIE(InfoExtractor):
|
|||||||
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||||
'playlist': [
|
'playlist': [
|
||||||
{
|
{
|
||||||
'file': '1353101989.mp3',
|
|
||||||
'md5': '39bc1eded3476e927c724321ddf116cf',
|
'md5': '39bc1eded3476e927c724321ddf116cf',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
'id': '1353101989',
|
||||||
|
'ext': 'mp3',
|
||||||
'title': 'Intro',
|
'title': 'Intro',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'file': '38097443.mp3',
|
|
||||||
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
|
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
'id': '38097443',
|
||||||
|
'ext': 'mp3',
|
||||||
'title': 'Kero One - Keep It Alive (Blazo remix)',
|
'title': 'Kero One - Keep It Alive (Blazo remix)',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Jazz Format Mixtape vol.1',
|
||||||
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'playlistend': 2
|
'playlistend': 2
|
||||||
},
|
},
|
||||||
|
@ -71,11 +71,12 @@ class BlipTVIE(SubtitlesInfoExtractor):
|
|||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
lookup_id = mobj.group('lookup_id')
|
lookup_id = mobj.group('lookup_id')
|
||||||
|
|
||||||
# See https://github.com/rg3/youtube-dl/issues/857
|
# See https://github.com/rg3/youtube-dl/issues/857 and
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/4197
|
||||||
if lookup_id:
|
if lookup_id:
|
||||||
info_page = self._download_webpage(
|
info_page = self._download_webpage(
|
||||||
'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
|
'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
|
||||||
video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id')
|
video_id = self._search_regex(r'config\.id\s*=\s*"([0-9]+)', info_page, 'video_id')
|
||||||
else:
|
else:
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
|
||||||
@ -165,9 +166,17 @@ class BlipTVIE(SubtitlesInfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class BlipTVUserIE(InfoExtractor):
|
class BlipTVUserIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
|
_VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$'
|
||||||
_PAGE_SIZE = 12
|
_PAGE_SIZE = 12
|
||||||
IE_NAME = 'blip.tv:user'
|
IE_NAME = 'blip.tv:user'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://blip.tv/actone',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'actone',
|
||||||
|
'title': 'Act One: The Series',
|
||||||
|
},
|
||||||
|
'playlist_count': 5,
|
||||||
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
@ -178,6 +187,7 @@ class BlipTVUserIE(InfoExtractor):
|
|||||||
page = self._download_webpage(url, username, 'Downloading user page')
|
page = self._download_webpage(url, username, 'Downloading user page')
|
||||||
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
||||||
page_base = page_base % mobj.group(1)
|
page_base = page_base % mobj.group(1)
|
||||||
|
title = self._og_search_title(page)
|
||||||
|
|
||||||
# Download video ids using BlipTV Ajax calls. Result size per
|
# Download video ids using BlipTV Ajax calls. Result size per
|
||||||
# query is limited (currently to 12 videos) so we need to query
|
# query is limited (currently to 12 videos) so we need to query
|
||||||
@ -214,4 +224,5 @@ class BlipTVUserIE(InfoExtractor):
|
|||||||
|
|
||||||
urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
|
urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
|
||||||
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
|
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
|
||||||
return [self.playlist_result(url_entries, playlist_title=username)]
|
return self.playlist_result(
|
||||||
|
url_entries, playlist_title=title, playlist_id=username)
|
||||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
|||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
|
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -23,7 +24,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class BrightcoveIE(InfoExtractor):
|
class BrightcoveIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
|
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*?\?(?P<query>.*)'
|
||||||
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
||||||
|
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
@ -110,6 +111,8 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
lambda m: m.group(1) + '/>', object_str)
|
lambda m: m.group(1) + '/>', object_str)
|
||||||
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
|
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
|
||||||
object_str = object_str.replace('<--', '<!--')
|
object_str = object_str.replace('<--', '<!--')
|
||||||
|
# remove namespace to simplify extraction
|
||||||
|
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
|
||||||
object_str = fix_xml_ampersands(object_str)
|
object_str = fix_xml_ampersands(object_str)
|
||||||
|
|
||||||
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
|
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
|
||||||
@ -218,7 +221,7 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
webpage = self._download_webpage(req, video_id)
|
webpage = self._download_webpage(req, video_id)
|
||||||
|
|
||||||
error_msg = self._html_search_regex(
|
error_msg = self._html_search_regex(
|
||||||
r"<h1>We're sorry.</h1>\s*<p>(.*?)</p>", webpage,
|
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
|
||||||
'error message', default=None)
|
'error message', default=None)
|
||||||
if error_msg is not None:
|
if error_msg is not None:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
@ -260,11 +263,19 @@ class BrightcoveIE(InfoExtractor):
|
|||||||
formats = []
|
formats = []
|
||||||
for rend in renditions:
|
for rend in renditions:
|
||||||
url = rend['defaultURL']
|
url = rend['defaultURL']
|
||||||
|
if not url:
|
||||||
|
continue
|
||||||
if rend['remote']:
|
if rend['remote']:
|
||||||
# This type of renditions are served through akamaihd.net,
|
url_comp = compat_urllib_parse_urlparse(url)
|
||||||
# but they don't use f4m manifests
|
if url_comp.path.endswith('.m3u8'):
|
||||||
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
|
formats.extend(
|
||||||
ext = 'flv'
|
self._extract_m3u8_formats(url, info['id'], 'mp4'))
|
||||||
|
continue
|
||||||
|
elif 'akamaihd.net' in url_comp.netloc:
|
||||||
|
# This type of renditions are served through
|
||||||
|
# akamaihd.net, but they don't use f4m manifests
|
||||||
|
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
|
||||||
|
ext = 'flv'
|
||||||
else:
|
else:
|
||||||
ext = determine_ext(url)
|
ext = determine_ext(url)
|
||||||
size = rend.get('size')
|
size = rend.get('size')
|
||||||
|
@ -10,12 +10,12 @@ from ..utils import ExtractorError
|
|||||||
class BYUtvIE(InfoExtractor):
|
class BYUtvIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
|
_VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.byutv.org/watch/44e80f7b-e3ba-43ba-8c51-b1fd96c94a79/granite-flats-talking',
|
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'granite-flats-talking',
|
'id': 'studio-c-season-5-episode-5',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'md5:4e9a7ce60f209a33eca0ac65b4918e1c',
|
'description': 'md5:5438d33774b6bdc662f9485a340401cc',
|
||||||
'title': 'Talking',
|
'title': 'Season 5 Episode 5',
|
||||||
'thumbnail': 're:^https?://.*promo.*'
|
'thumbnail': 're:^https?://.*promo.*'
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
|
@ -27,7 +27,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
'title': 'Developer Kick-Off Session: Stuff We Love',
|
'title': 'Developer Kick-Off Session: Stuff We Love',
|
||||||
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
|
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
|
||||||
'duration': 4576,
|
'duration': 4576,
|
||||||
'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
||||||
'session_code': 'KOS002',
|
'session_code': 'KOS002',
|
||||||
'session_day': 'Day 1',
|
'session_day': 'Day 1',
|
||||||
'session_room': 'Arena 1A',
|
'session_room': 'Arena 1A',
|
||||||
@ -43,7 +43,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
'title': 'Self-service BI with Power BI - nuclear testing',
|
'title': 'Self-service BI with Power BI - nuclear testing',
|
||||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||||
'duration': 1540,
|
'duration': 1540,
|
||||||
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||||
'authors': [ 'Mike Wilmot' ],
|
'authors': [ 'Mike Wilmot' ],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -115,7 +115,7 @@ class Channel9IE(InfoExtractor):
|
|||||||
return self._html_search_meta('description', html, 'description')
|
return self._html_search_meta('description', html, 'description')
|
||||||
|
|
||||||
def _extract_duration(self, html):
|
def _extract_duration(self, html):
|
||||||
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
||||||
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
|
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
|
||||||
|
|
||||||
def _extract_slides(self, html):
|
def _extract_slides(self, html):
|
||||||
@ -258,16 +258,17 @@ class Channel9IE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, content_path, 'Downloading web page')
|
webpage = self._download_webpage(url, content_path, 'Downloading web page')
|
||||||
|
|
||||||
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
|
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
|
||||||
if page_type_m is None:
|
if page_type_m is not None:
|
||||||
raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
|
page_type = page_type_m.group('pagetype')
|
||||||
|
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
|
||||||
|
return self._extract_entry_item(webpage, content_path)
|
||||||
|
elif page_type == 'Session': # Event session page, may contain downloadable content
|
||||||
|
return self._extract_session(webpage, content_path)
|
||||||
|
elif page_type == 'Event':
|
||||||
|
return self._extract_list(content_path)
|
||||||
|
else:
|
||||||
|
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
|
||||||
|
|
||||||
page_type = page_type_m.group('pagetype')
|
else: # Assuming list
|
||||||
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
|
||||||
return self._extract_list(content_path)
|
return self._extract_list(content_path)
|
||||||
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
|
|
||||||
return self._extract_entry_item(webpage, content_path)
|
|
||||||
elif page_type == 'Session': # Event session page, may contain downloadable content
|
|
||||||
return self._extract_session(webpage, content_path)
|
|
||||||
else:
|
|
||||||
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
|
|
@ -42,11 +42,12 @@ class CinemassacreIE(InfoExtractor):
|
|||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
||||||
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
|
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?P<full_video_id>(?:Cinemassacre-)?(?P<video_id>.+?)))"', webpage)
|
||||||
if not mobj:
|
if not mobj:
|
||||||
raise ExtractorError('Can\'t extract embed url and video id')
|
raise ExtractorError('Can\'t extract embed url and video id')
|
||||||
playerdata_url = mobj.group('embed_url')
|
playerdata_url = mobj.group('embed_url')
|
||||||
video_id = mobj.group('video_id')
|
video_id = mobj.group('video_id')
|
||||||
|
full_video_id = mobj.group('full_video_id')
|
||||||
|
|
||||||
video_title = self._html_search_regex(
|
video_title = self._html_search_regex(
|
||||||
r'<title>(?P<title>.+?)\|', webpage, 'title')
|
r'<title>(?P<title>.+?)\|', webpage, 'title')
|
||||||
@ -60,37 +61,52 @@ class CinemassacreIE(InfoExtractor):
|
|||||||
vidurl = self._search_regex(
|
vidurl = self._search_regex(
|
||||||
r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
|
r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
|
||||||
|
|
||||||
videolist_url = self._search_regex(
|
videolist_url = None
|
||||||
r"file\s*:\s*'(http.+?/jwplayer\.smil)'", playerdata, 'jwplayer.smil')
|
|
||||||
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
|
||||||
|
|
||||||
formats = []
|
mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
|
||||||
baseurl = vidurl[:vidurl.rfind('/')+1]
|
if mobj:
|
||||||
for video in videolist.findall('.//video'):
|
videoserver = mobj.group('videoserver')
|
||||||
src = video.get('src')
|
mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
|
||||||
if not src:
|
vidid = mobj.group('vidid') if mobj else full_video_id
|
||||||
continue
|
videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
|
||||||
file_ = src.partition(':')[-1]
|
else:
|
||||||
width = int_or_none(video.get('width'))
|
mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
|
||||||
height = int_or_none(video.get('height'))
|
if mobj:
|
||||||
bitrate = int_or_none(video.get('system-bitrate'))
|
videolist_url = mobj.group('smil')
|
||||||
format = {
|
|
||||||
'url': baseurl + file_,
|
if videolist_url:
|
||||||
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
|
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
||||||
}
|
formats = []
|
||||||
if width or height:
|
baseurl = vidurl[:vidurl.rfind('/')+1]
|
||||||
format.update({
|
for video in videolist.findall('.//video'):
|
||||||
'tbr': bitrate // 1000 if bitrate else None,
|
src = video.get('src')
|
||||||
'width': width,
|
if not src:
|
||||||
'height': height,
|
continue
|
||||||
})
|
file_ = src.partition(':')[-1]
|
||||||
else:
|
width = int_or_none(video.get('width'))
|
||||||
format.update({
|
height = int_or_none(video.get('height'))
|
||||||
'abr': bitrate // 1000 if bitrate else None,
|
bitrate = int_or_none(video.get('system-bitrate'))
|
||||||
'vcodec': 'none',
|
format = {
|
||||||
})
|
'url': baseurl + file_,
|
||||||
formats.append(format)
|
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
|
||||||
self._sort_formats(formats)
|
}
|
||||||
|
if width or height:
|
||||||
|
format.update({
|
||||||
|
'tbr': bitrate // 1000 if bitrate else None,
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
format.update({
|
||||||
|
'abr': bitrate // 1000 if bitrate else None,
|
||||||
|
'vcodec': 'none',
|
||||||
|
})
|
||||||
|
formats.append(format)
|
||||||
|
self._sort_formats(formats)
|
||||||
|
else:
|
||||||
|
formats = [{
|
||||||
|
'url': vidurl,
|
||||||
|
}]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -4,14 +4,16 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
ExtractorError,
|
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
remove_end,
|
|
||||||
HEADRequest,
|
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
)
|
)
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
HEADRequest,
|
||||||
|
remove_end,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CloudyIE(InfoExtractor):
|
class CloudyIE(InfoExtractor):
|
||||||
|
@ -16,9 +16,10 @@ class CNNIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
|
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
|
||||||
'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
|
|
||||||
'md5': '3e6121ea48df7e2259fe73a0628605c4',
|
'md5': '3e6121ea48df7e2259fe73a0628605c4',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
'id': 'sports_2013_06_09_nadal-1-on-1.cnn',
|
||||||
|
'ext': 'mp4',
|
||||||
'title': 'Nadal wins 8th French Open title',
|
'title': 'Nadal wins 8th French Open title',
|
||||||
'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
|
'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
|
||||||
'duration': 135,
|
'duration': 135,
|
||||||
@ -27,9 +28,10 @@ class CNNIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||||
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
|
||||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||||
"info_dict": {
|
"info_dict": {
|
||||||
|
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
|
||||||
|
'ext': 'mp4',
|
||||||
"title": "Student's epic speech stuns new freshmen",
|
"title": "Student's epic speech stuns new freshmen",
|
||||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||||
"upload_date": "20130821",
|
"upload_date": "20130821",
|
||||||
|
@ -2,7 +2,6 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from .mtv import MTVServicesInfoExtractor
|
from .mtv import MTVServicesInfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_str,
|
compat_str,
|
||||||
@ -31,7 +30,7 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ComedyCentralShowsIE(InfoExtractor):
|
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||||
IE_DESC = 'The Daily Show / The Colbert Report'
|
IE_DESC = 'The Daily Show / The Colbert Report'
|
||||||
# urls can be abbreviations like :thedailyshow or :colbert
|
# urls can be abbreviations like :thedailyshow or :colbert
|
||||||
# urls for episodes like:
|
# urls for episodes like:
|
||||||
@ -109,18 +108,8 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
'400': (384, 216),
|
'400': (384, 216),
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _transform_rtmp_url(rtmp_video_url):
|
|
||||||
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url)
|
|
||||||
if not m:
|
|
||||||
raise ExtractorError('Cannot transform RTMP url')
|
|
||||||
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
|
|
||||||
return base + m.group('finalid')
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError('Invalid URL: %s' % url)
|
|
||||||
|
|
||||||
if mobj.group('shortname'):
|
if mobj.group('shortname'):
|
||||||
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
||||||
@ -212,9 +201,6 @@ class ComedyCentralShowsIE(InfoExtractor):
|
|||||||
'ext': self._video_extensions.get(format, 'mp4'),
|
'ext': self._video_extensions.get(format, 'mp4'),
|
||||||
'height': h,
|
'height': h,
|
||||||
'width': w,
|
'width': w,
|
||||||
|
|
||||||
'format_note': 'HTTP 400 at the moment (patches welcome!)',
|
|
||||||
'preference': -100,
|
|
||||||
})
|
})
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': 'rtmp-%s' % format,
|
'format_id': 'rtmp-%s' % format,
|
||||||
|
@ -12,13 +12,14 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_http_client,
|
compat_http_client,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_str,
|
compat_str,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
compiled_regex_type,
|
compiled_regex_type,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -42,7 +43,11 @@ class InfoExtractor(object):
|
|||||||
information possibly downloading the video to the file system, among
|
information possibly downloading the video to the file system, among
|
||||||
other possible outcomes.
|
other possible outcomes.
|
||||||
|
|
||||||
The dictionaries must include the following fields:
|
The type field determines the the type of the result.
|
||||||
|
By far the most common value (and the default if _type is missing) is
|
||||||
|
"video", which indicates a single video.
|
||||||
|
|
||||||
|
For a video, the dictionaries must include the following fields:
|
||||||
|
|
||||||
id: Video identifier.
|
id: Video identifier.
|
||||||
title: Video title, unescaped.
|
title: Video title, unescaped.
|
||||||
@ -86,6 +91,11 @@ class InfoExtractor(object):
|
|||||||
by this field, regardless of all other values.
|
by this field, regardless of all other values.
|
||||||
-1 for default (order by other properties),
|
-1 for default (order by other properties),
|
||||||
-2 or smaller for less than default.
|
-2 or smaller for less than default.
|
||||||
|
* language_preference Is this in the correct requested
|
||||||
|
language?
|
||||||
|
10 if it's what the URL is about,
|
||||||
|
-1 for default (don't know),
|
||||||
|
-10 otherwise, other values reserved for now.
|
||||||
* quality Order number of the video quality of this
|
* quality Order number of the video quality of this
|
||||||
format, irrespective of the file format.
|
format, irrespective of the file format.
|
||||||
-1 for default (order by other properties),
|
-1 for default (order by other properties),
|
||||||
@ -145,6 +155,38 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
Unless mentioned otherwise, None is equivalent to absence of information.
|
Unless mentioned otherwise, None is equivalent to absence of information.
|
||||||
|
|
||||||
|
|
||||||
|
_type "playlist" indicates multiple videos.
|
||||||
|
There must be a key "entries", which is a list or a PagedList object, each
|
||||||
|
element of which is a valid dictionary under this specfication.
|
||||||
|
|
||||||
|
Additionally, playlists can have "title" and "id" attributes with the same
|
||||||
|
semantics as videos (see above).
|
||||||
|
|
||||||
|
|
||||||
|
_type "multi_video" indicates that there are multiple videos that
|
||||||
|
form a single show, for examples multiple acts of an opera or TV episode.
|
||||||
|
It must have an entries key like a playlist and contain all the keys
|
||||||
|
required for a video at the same time.
|
||||||
|
|
||||||
|
|
||||||
|
_type "url" indicates that the video must be extracted from another
|
||||||
|
location, possibly by a different extractor. Its only required key is:
|
||||||
|
"url" - the next URL to extract.
|
||||||
|
|
||||||
|
Additionally, it may have properties believed to be identical to the
|
||||||
|
resolved entity, for example "title" if the title of the referred video is
|
||||||
|
known ahead of time.
|
||||||
|
|
||||||
|
|
||||||
|
_type "url_transparent" entities have the same specification as "url", but
|
||||||
|
indicate that the given additional information is more precise than the one
|
||||||
|
associated with the resolved URL.
|
||||||
|
This is useful when a site employs a video service that hosts the video and
|
||||||
|
its technical metadata, but that video service does not embed a useful
|
||||||
|
title, description etc.
|
||||||
|
|
||||||
|
|
||||||
Subclasses of this one should re-define the _real_initialize() and
|
Subclasses of this one should re-define the _real_initialize() and
|
||||||
_real_extract() methods and define a _VALID_URL regexp.
|
_real_extract() methods and define a _VALID_URL regexp.
|
||||||
Probably, they should also be added to the list of extractors.
|
Probably, they should also be added to the list of extractors.
|
||||||
@ -403,7 +445,7 @@ class InfoExtractor(object):
|
|||||||
video_info['title'] = playlist_title
|
video_info['title'] = playlist_title
|
||||||
return video_info
|
return video_info
|
||||||
|
|
||||||
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||||
"""
|
"""
|
||||||
Perform a regex search on the given string, using a single or a list of
|
Perform a regex search on the given string, using a single or a list of
|
||||||
patterns returning the first matching group.
|
patterns returning the first matching group.
|
||||||
@ -424,8 +466,11 @@ class InfoExtractor(object):
|
|||||||
_name = name
|
_name = name
|
||||||
|
|
||||||
if mobj:
|
if mobj:
|
||||||
# return the first matching group
|
if group is None:
|
||||||
return next(g for g in mobj.groups() if g is not None)
|
# return the first matching group
|
||||||
|
return next(g for g in mobj.groups() if g is not None)
|
||||||
|
else:
|
||||||
|
return mobj.group(group)
|
||||||
elif default is not _NO_DEFAULT:
|
elif default is not _NO_DEFAULT:
|
||||||
return default
|
return default
|
||||||
elif fatal:
|
elif fatal:
|
||||||
@ -435,11 +480,11 @@ class InfoExtractor(object):
|
|||||||
'please report this issue on http://yt-dl.org/bug' % _name)
|
'please report this issue on http://yt-dl.org/bug' % _name)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||||
"""
|
"""
|
||||||
Like _search_regex, but strips HTML tags and unescapes entities.
|
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||||
"""
|
"""
|
||||||
res = self._search_regex(pattern, string, name, default, fatal, flags)
|
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
|
||||||
if res:
|
if res:
|
||||||
return clean_html(res).strip()
|
return clean_html(res).strip()
|
||||||
else:
|
else:
|
||||||
@ -533,9 +578,9 @@ class InfoExtractor(object):
|
|||||||
display_name = name
|
display_name = name
|
||||||
return self._html_search_regex(
|
return self._html_search_regex(
|
||||||
r'''(?ix)<meta
|
r'''(?ix)<meta
|
||||||
(?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?)
|
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
|
||||||
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
|
[^>]+content=(["\'])(?P<content>.*?)\1''' % re.escape(name),
|
||||||
html, display_name, fatal=fatal, **kwargs)
|
html, display_name, fatal=fatal, group='content', **kwargs)
|
||||||
|
|
||||||
def _dc_search_uploader(self, html):
|
def _dc_search_uploader(self, html):
|
||||||
return self._html_search_meta('dc.creator', html, 'uploader')
|
return self._html_search_meta('dc.creator', html, 'uploader')
|
||||||
@ -611,6 +656,7 @@ class InfoExtractor(object):
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
preference,
|
preference,
|
||||||
|
f.get('language_preference') if f.get('language_preference') is not None else -1,
|
||||||
f.get('quality') if f.get('quality') is not None else -1,
|
f.get('quality') if f.get('quality') is not None else -1,
|
||||||
f.get('height') if f.get('height') is not None else -1,
|
f.get('height') if f.get('height') is not None else -1,
|
||||||
f.get('width') if f.get('width') is not None else -1,
|
f.get('width') if f.get('width') is not None else -1,
|
||||||
|
@ -17,7 +17,6 @@ from ..utils import (
|
|||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
clean_html,
|
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
from ..aes import (
|
from ..aes import (
|
||||||
@ -265,8 +264,6 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
|||||||
if not lang_code:
|
if not lang_code:
|
||||||
continue
|
continue
|
||||||
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||||
if not sub_root:
|
|
||||||
subtitles[lang_code] = ''
|
|
||||||
if sub_format == 'ass':
|
if sub_format == 'ass':
|
||||||
subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
|
subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
|
||||||
else:
|
else:
|
||||||
|
@ -94,7 +94,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
|
|
||||||
# It may just embed a vevo video:
|
# It may just embed a vevo video:
|
||||||
m_vevo = re.search(
|
m_vevo = re.search(
|
||||||
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
|
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
|
||||||
webpage)
|
webpage)
|
||||||
if m_vevo is not None:
|
if m_vevo is not None:
|
||||||
vevo_id = m_vevo.group('id')
|
vevo_id = m_vevo.group('id')
|
||||||
|
@ -5,7 +5,8 @@ import os.path
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import compat_urllib_parse_unquote, url_basename
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
from ..utils import url_basename
|
||||||
|
|
||||||
|
|
||||||
class DropboxIE(InfoExtractor):
|
class DropboxIE(InfoExtractor):
|
||||||
|
@ -20,7 +20,7 @@ class EpornerIE(InfoExtractor):
|
|||||||
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
|
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Infamous Tiffany Teen Strip Tease Video',
|
'title': 'Infamous Tiffany Teen Strip Tease Video',
|
||||||
'duration': 194,
|
'duration': 1838,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
}
|
||||||
@ -57,9 +57,7 @@ class EpornerIE(InfoExtractor):
|
|||||||
formats.append(fmt)
|
formats.append(fmt)
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
duration = parse_duration(self._search_regex(
|
duration = parse_duration(self._html_search_meta('duration', webpage))
|
||||||
r'class="mbtim">([0-9:]+)</div>', webpage, 'duration',
|
|
||||||
fatal=False))
|
|
||||||
view_count = str_to_int(self._search_regex(
|
view_count = str_to_int(self._search_regex(
|
||||||
r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
|
r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
|
||||||
webpage, 'view count', fatal=False))
|
webpage, 'view count', fatal=False))
|
||||||
|
@ -5,12 +5,14 @@ import re
|
|||||||
import socket
|
import socket
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_http_client,
|
compat_http_client,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
compat_urllib_error,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
limit_length,
|
limit_length,
|
||||||
|
75
youtube_dl/extractor/folketinget.py
Normal file
75
youtube_dl/extractor/folketinget.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_parse_qs
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
parse_duration,
|
||||||
|
parse_iso8601,
|
||||||
|
xpath_text,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FolketingetIE(InfoExtractor):
|
||||||
|
IE_DESC = 'Folketinget (ft.dk; Danish parliament)'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1165642',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Åbent samråd i Erhvervsudvalget',
|
||||||
|
'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet',
|
||||||
|
'view_count': int,
|
||||||
|
'width': 768,
|
||||||
|
'height': 432,
|
||||||
|
'tbr': 928000,
|
||||||
|
'timestamp': 1416493800,
|
||||||
|
'upload_date': '20141120',
|
||||||
|
'duration': 3960,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': 'rtmpdump required',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
title = self._og_search_title(webpage)
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<',
|
||||||
|
webpage, 'description', fatal=False)
|
||||||
|
|
||||||
|
player_params = compat_parse_qs(self._search_regex(
|
||||||
|
r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"',
|
||||||
|
webpage, 'player params'))
|
||||||
|
xml_url = player_params['xml'][0]
|
||||||
|
doc = self._download_xml(xml_url, video_id)
|
||||||
|
|
||||||
|
timestamp = parse_iso8601(xpath_text(doc, './/date'))
|
||||||
|
duration = parse_duration(xpath_text(doc, './/duration'))
|
||||||
|
width = int_or_none(xpath_text(doc, './/width'))
|
||||||
|
height = int_or_none(xpath_text(doc, './/height'))
|
||||||
|
view_count = int_or_none(xpath_text(doc, './/views'))
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'format_id': n.attrib['bitrate'],
|
||||||
|
'url': xpath_text(n, './url', fatal=True),
|
||||||
|
'tbr': int_or_none(n.attrib['bitrate']),
|
||||||
|
} for n in doc.findall('.//streams/stream')]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'description': description,
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
|
'duration': duration,
|
||||||
|
'view_count': view_count,
|
||||||
|
}
|
38
youtube_dl/extractor/freevideo.py
Normal file
38
youtube_dl/extractor/freevideo.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class FreeVideoIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'^http://www.freevideo.cz/vase-videa/(?P<id>[^.]+)\.html(?:$|[?#])'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.freevideo.cz/vase-videa/vysukany-zadecek-22033.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'vysukany-zadecek-22033',
|
||||||
|
'ext': 'mp4',
|
||||||
|
"title": "vysukany-zadecek-22033",
|
||||||
|
"age_limit": 18,
|
||||||
|
},
|
||||||
|
'skip': 'Blocked outside .cz',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage, handle = self._download_webpage_handle(url, video_id)
|
||||||
|
if '//www.czechav.com/' in handle.geturl():
|
||||||
|
raise ExtractorError(
|
||||||
|
'Access to freevideo is blocked from your location',
|
||||||
|
expected=True)
|
||||||
|
|
||||||
|
video_url = self._search_regex(
|
||||||
|
r'\s+url: "(http://[a-z0-9-]+.cdn.freevideo.cz/stream/.*?/video.mp4)"',
|
||||||
|
webpage, 'video URL')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'title': video_id,
|
||||||
|
'age_limit': 18,
|
||||||
|
}
|
@ -21,7 +21,6 @@ class FunnyOrDieIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.funnyordie.com/embed/e402820827',
|
'url': 'http://www.funnyordie.com/embed/e402820827',
|
||||||
'md5': '29f4c5e5a61ca39dfd7e8348a75d0aad',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'e402820827',
|
'id': 'e402820827',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
|
@ -8,12 +8,11 @@ from ..utils import (
|
|||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
get_meta_content,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class GameSpotIE(InfoExtractor):
|
class GameSpotIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
|
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
|
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
|
||||||
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
|
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
|
||||||
@ -26,10 +25,10 @@ class GameSpotIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
page_id = self._match_id(url)
|
||||||
page_id = mobj.group('page_id')
|
|
||||||
webpage = self._download_webpage(url, page_id)
|
webpage = self._download_webpage(url, page_id)
|
||||||
data_video_json = self._search_regex(r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
data_video_json = self._search_regex(
|
||||||
|
r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
||||||
data_video = json.loads(unescapeHTML(data_video_json))
|
data_video = json.loads(unescapeHTML(data_video_json))
|
||||||
|
|
||||||
# Transform the manifest url to a link to the mp4 files
|
# Transform the manifest url to a link to the mp4 files
|
||||||
@ -41,7 +40,8 @@ class GameSpotIE(InfoExtractor):
|
|||||||
http_path = f4m_path[1:].split('/', 1)[1]
|
http_path = f4m_path[1:].split('/', 1)[1]
|
||||||
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
||||||
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
||||||
http_template = compat_urlparse.urljoin('http://video.gamespotcdn.com/', http_template)
|
http_template = compat_urlparse.urljoin(
|
||||||
|
'http://video.gamespotcdn.com/', http_template)
|
||||||
formats = []
|
formats = []
|
||||||
for q in qualities:
|
for q in qualities:
|
||||||
formats.append({
|
formats.append({
|
||||||
@ -52,8 +52,9 @@ class GameSpotIE(InfoExtractor):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': data_video['guid'],
|
'id': data_video['guid'],
|
||||||
|
'display_id': page_id,
|
||||||
'title': compat_urllib_parse.unquote(data_video['title']),
|
'title': compat_urllib_parse.unquote(data_video['title']),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'description': get_meta_content('description', webpage),
|
'description': self._html_search_meta('description', webpage),
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
}
|
}
|
||||||
|
@ -7,11 +7,12 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .youtube import YoutubeIE
|
from .youtube import YoutubeIE
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_xml_parse_error,
|
compat_xml_parse_error,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
@ -99,6 +100,22 @@ class GenericIE(InfoExtractor):
|
|||||||
'uploader': 'Championat',
|
'uploader': 'Championat',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
# https://github.com/rg3/youtube-dl/issues/3541
|
||||||
|
'add_ie': ['Brightcove'],
|
||||||
|
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3866516442001',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Leer mij vrouwen kennen: Aflevering 1',
|
||||||
|
'description': 'Leer mij vrouwen kennen: Aflevering 1',
|
||||||
|
'uploader': 'SBS Broadcasting',
|
||||||
|
},
|
||||||
|
'skip': 'Restricted to Netherlands',
|
||||||
|
'params': {
|
||||||
|
'skip_download': True, # m3u8 download
|
||||||
|
},
|
||||||
|
},
|
||||||
# Direct link to a video
|
# Direct link to a video
|
||||||
{
|
{
|
||||||
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
|
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
|
||||||
@ -417,7 +434,17 @@ class GenericIE(InfoExtractor):
|
|||||||
'title': 'Chet Chat 171 - Oct 29, 2014',
|
'title': 'Chet Chat 171 - Oct 29, 2014',
|
||||||
'upload_date': '20141029',
|
'upload_date': '20141029',
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
# Livestream embed
|
||||||
|
{
|
||||||
|
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '67864563',
|
||||||
|
'ext': 'flv',
|
||||||
|
'upload_date': '20141112',
|
||||||
|
'title': 'Rosetta #CometLanding webcast HL 10',
|
||||||
|
}
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
def report_following_redirect(self, new_url):
|
def report_following_redirect(self, new_url):
|
||||||
@ -559,6 +586,7 @@ class GenericIE(InfoExtractor):
|
|||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': os.path.splitext(url_basename(url))[0],
|
'title': os.path.splitext(url_basename(url))[0],
|
||||||
|
'direct': True,
|
||||||
'formats': [{
|
'formats': [{
|
||||||
'format_id': m.group('format_id'),
|
'format_id': m.group('format_id'),
|
||||||
'url': url,
|
'url': url,
|
||||||
@ -898,6 +926,12 @@ class GenericIE(InfoExtractor):
|
|||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
|
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
|
||||||
|
|
||||||
|
mobj = re.search(
|
||||||
|
r'<iframe[^>]+src="(?P<url>https?://new\.livestream\.com/[^"]+/player[^"]+)"',
|
||||||
|
webpage)
|
||||||
|
if mobj is not None:
|
||||||
|
return self.url_result(mobj.group('url'), 'Livestream')
|
||||||
|
|
||||||
def check_video(vurl):
|
def check_video(vurl):
|
||||||
vpath = compat_urlparse.urlparse(vurl).path
|
vpath = compat_urlparse.urlparse(vurl).path
|
||||||
vext = determine_ext(vpath)
|
vext = determine_ext(vpath)
|
||||||
@ -945,7 +979,7 @@ class GenericIE(InfoExtractor):
|
|||||||
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
|
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# HTML5 video
|
# HTML5 video
|
||||||
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src="([^"]+)"', webpage)
|
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
|
||||||
if not found:
|
if not found:
|
||||||
found = re.search(
|
found = re.search(
|
||||||
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
|
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
|
||||||
|
@ -5,13 +5,15 @@ import random
|
|||||||
import math
|
import math
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
ExtractorError,
|
|
||||||
float_or_none,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_chr,
|
compat_chr,
|
||||||
compat_ord,
|
compat_ord,
|
||||||
)
|
)
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
float_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class GloboIE(InfoExtractor):
|
class GloboIE(InfoExtractor):
|
||||||
|
57
youtube_dl/extractor/goldenmoustache.py
Normal file
57
youtube_dl/extractor/goldenmoustache.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GoldenMoustacheIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/',
|
||||||
|
'md5': '0f904432fa07da5054d6c8beb5efb51a',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3700',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Suricate - Le Poker',
|
||||||
|
'description': 'md5:3d1f242f44f8c8cb0a106f1fd08e5dc9',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
'view_count': int,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.goldenmoustache.com/le-lab-tout-effacer-mc-fly-et-carlito-55249/',
|
||||||
|
'md5': '27f0c50fb4dd5f01dc9082fc67cd5700',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '55249',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Le LAB - Tout Effacer (Mc Fly et Carlito)',
|
||||||
|
'description': 'md5:9b7fbf11023fb2250bd4b185e3de3b2a',
|
||||||
|
'thumbnail': 're:^https?://.*\.(?:png|jpg)$',
|
||||||
|
'view_count': int,
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
video_url = self._html_search_regex(
|
||||||
|
r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL')
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<title>(.*?)(?: - Golden Moustache)?</title>', webpage, 'title')
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
description = self._og_search_description(webpage)
|
||||||
|
view_count = int_or_none(self._html_search_regex(
|
||||||
|
r'<strong>([0-9]+)</strong>\s*VUES</span>',
|
||||||
|
webpage, 'view count', fatal=False))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'view_count': view_count,
|
||||||
|
}
|
@ -1,15 +1,11 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
str_to_int,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
import json
|
|
||||||
|
|
||||||
|
|
||||||
class GoshgayIE(InfoExtractor):
|
class GoshgayIE(InfoExtractor):
|
||||||
@ -27,36 +23,27 @@ class GoshgayIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._search_regex(r'class="video-title"><h1>(.+?)<', webpage, 'title')
|
title = self._og_search_title(webpage)
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
family_friendly = self._html_search_meta(
|
||||||
|
'isFamilyFriendly', webpage, default='false')
|
||||||
|
config_url = self._search_regex(
|
||||||
|
r"'config'\s*:\s*'([^']+)'", webpage, 'config URL')
|
||||||
|
|
||||||
player_config = self._search_regex(
|
config = self._download_xml(
|
||||||
r'(?s)jwplayer\("player"\)\.setup\(({.+?})\)', webpage, 'config settings')
|
config_url, video_id, 'Downloading player config XML')
|
||||||
player_vars = json.loads(player_config.replace("'", '"'))
|
|
||||||
width = str_to_int(player_vars.get('width'))
|
|
||||||
height = str_to_int(player_vars.get('height'))
|
|
||||||
config_uri = player_vars.get('config')
|
|
||||||
|
|
||||||
if config_uri is None:
|
if config is None:
|
||||||
raise ExtractorError('Missing config URI')
|
|
||||||
node = self._download_xml(config_uri, video_id, 'Downloading player config XML',
|
|
||||||
errnote='Unable to download XML')
|
|
||||||
if node is None:
|
|
||||||
raise ExtractorError('Missing config XML')
|
raise ExtractorError('Missing config XML')
|
||||||
if node.tag != 'config':
|
if config.tag != 'config':
|
||||||
raise ExtractorError('Missing config attribute')
|
raise ExtractorError('Missing config attribute')
|
||||||
fns = node.findall('file')
|
fns = config.findall('file')
|
||||||
imgs = node.findall('image')
|
if len(fns) < 1:
|
||||||
if len(fns) != 1:
|
|
||||||
raise ExtractorError('Missing media URI')
|
raise ExtractorError('Missing media URI')
|
||||||
video_url = fns[0].text
|
video_url = fns[0].text
|
||||||
if len(imgs) < 1:
|
|
||||||
thumbnail = None
|
|
||||||
else:
|
|
||||||
thumbnail = imgs[0].text
|
|
||||||
|
|
||||||
url_comp = compat_urlparse.urlparse(url)
|
url_comp = compat_urlparse.urlparse(url)
|
||||||
ref = "%s://%s%s" % (url_comp[0], url_comp[1], url_comp[2])
|
ref = "%s://%s%s" % (url_comp[0], url_comp[1], url_comp[2])
|
||||||
@ -65,9 +52,7 @@ class GoshgayIE(InfoExtractor):
|
|||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'title': title,
|
'title': title,
|
||||||
'width': width,
|
|
||||||
'height': height,
|
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'http_referer': ref,
|
'http_referer': ref,
|
||||||
'age_limit': 18,
|
'age_limit': 0 if family_friendly == 'true' else 18,
|
||||||
}
|
}
|
||||||
|
@ -8,12 +8,13 @@ import re
|
|||||||
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import ExtractorError, compat_urllib_request, compat_html_parser
|
from ..compat import (
|
||||||
|
compat_html_parser,
|
||||||
from ..utils import (
|
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
class GroovesharkHtmlParser(compat_html_parser.HTMLParser):
|
class GroovesharkHtmlParser(compat_html_parser.HTMLParser):
|
||||||
|
@ -3,7 +3,7 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
get_meta_content,
|
determine_ext,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
)
|
)
|
||||||
@ -25,11 +25,11 @@ class HeiseIE(InfoExtractor):
|
|||||||
'title': (
|
'title': (
|
||||||
"Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
|
"Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
|
||||||
),
|
),
|
||||||
'format_id': 'mp4_720',
|
'format_id': 'mp4_720p',
|
||||||
'timestamp': 1411812600,
|
'timestamp': 1411812600,
|
||||||
'upload_date': '20140927',
|
'upload_date': '20140927',
|
||||||
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
|
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
|
||||||
'thumbnail': 're:https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpe?g$',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,11 +49,12 @@ class HeiseIE(InfoExtractor):
|
|||||||
info = {
|
info = {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
|
'timestamp': parse_iso8601(
|
||||||
|
self._html_search_meta('date', webpage)),
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
}
|
}
|
||||||
|
|
||||||
title = get_meta_content('fulltitle', webpage)
|
title = self._html_search_meta('fulltitle', webpage)
|
||||||
if title:
|
if title:
|
||||||
info['title'] = title
|
info['title'] = title
|
||||||
else:
|
else:
|
||||||
@ -64,9 +65,12 @@ class HeiseIE(InfoExtractor):
|
|||||||
label = source_node.attrib['label']
|
label = source_node.attrib['label']
|
||||||
height = int_or_none(self._search_regex(
|
height = int_or_none(self._search_regex(
|
||||||
r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
|
r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
|
||||||
|
video_url = source_node.attrib['file']
|
||||||
|
ext = determine_ext(video_url, '')
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': source_node.attrib['file'],
|
'url': video_url,
|
||||||
'format_note': label,
|
'format_note': label,
|
||||||
|
'format_id': '%s_%s' % (ext, label),
|
||||||
'height': height,
|
'height': height,
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
@ -6,7 +6,6 @@ import json
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
get_element_by_attribute,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -27,10 +26,11 @@ class ImdbIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
|
webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
|
||||||
descr = get_element_by_attribute('itemprop', 'description', webpage)
|
descr = self._html_search_regex(
|
||||||
|
r'(?s)<span itemprop="description">(.*?)</span>',
|
||||||
|
webpage, 'description', fatal=False)
|
||||||
available_formats = re.findall(
|
available_formats = re.findall(
|
||||||
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
|
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
|
||||||
flags=re.MULTILINE)
|
flags=re.MULTILINE)
|
||||||
@ -73,9 +73,7 @@ class ImdbListIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
list_id = self._match_id(url)
|
||||||
list_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, list_id)
|
webpage = self._download_webpage(url, list_id)
|
||||||
entries = [
|
entries = [
|
||||||
self.url_result('http://www.imdb.com' + m, 'Imdb')
|
self.url_result('http://www.imdb.com' + m, 'Imdb')
|
||||||
|
@ -5,11 +5,11 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
get_element_by_id,
|
|
||||||
parse_iso8601,
|
|
||||||
determine_ext,
|
determine_ext,
|
||||||
int_or_none,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
get_element_by_id,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,7 +30,7 @@ class IzleseneIE(InfoExtractor):
|
|||||||
'description': 'md5:253753e2655dde93f59f74b572454f6d',
|
'description': 'md5:253753e2655dde93f59f74b572454f6d',
|
||||||
'thumbnail': 're:^http://.*\.jpg',
|
'thumbnail': 're:^http://.*\.jpg',
|
||||||
'uploader_id': 'pelikzzle',
|
'uploader_id': 'pelikzzle',
|
||||||
'timestamp': 1404298698,
|
'timestamp': 1404302298,
|
||||||
'upload_date': '20140702',
|
'upload_date': '20140702',
|
||||||
'duration': 95.395,
|
'duration': 95.395,
|
||||||
'age_limit': 0,
|
'age_limit': 0,
|
||||||
@ -46,7 +46,7 @@ class IzleseneIE(InfoExtractor):
|
|||||||
'description': 'Tarkan Dortmund 2006 Konseri',
|
'description': 'Tarkan Dortmund 2006 Konseri',
|
||||||
'thumbnail': 're:^http://.*\.jpg',
|
'thumbnail': 're:^http://.*\.jpg',
|
||||||
'uploader_id': 'parlayankiz',
|
'uploader_id': 'parlayankiz',
|
||||||
'timestamp': 1163318593,
|
'timestamp': 1163322193,
|
||||||
'upload_date': '20061112',
|
'upload_date': '20061112',
|
||||||
'duration': 253.666,
|
'duration': 253.666,
|
||||||
'age_limit': 0,
|
'age_limit': 0,
|
||||||
@ -55,10 +55,9 @@ class IzleseneIE(InfoExtractor):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
url = 'http://www.izlesene.com/video/%s' % video_id
|
|
||||||
|
|
||||||
|
url = 'http://www.izlesene.com/video/%s' % video_id
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
title = self._og_search_title(webpage)
|
title = self._og_search_title(webpage)
|
||||||
|
@ -4,6 +4,7 @@ import random
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
class Laola1TvIE(InfoExtractor):
|
class Laola1TvIE(InfoExtractor):
|
||||||
|
@ -18,7 +18,7 @@ from ..utils import (
|
|||||||
|
|
||||||
class LivestreamIE(InfoExtractor):
|
class LivestreamIE(InfoExtractor):
|
||||||
IE_NAME = 'livestream'
|
IE_NAME = 'livestream'
|
||||||
_VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
|
_VALID_URL = r'https?://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
|
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
|
||||||
'md5': '53274c76ba7754fb0e8d072716f2292b',
|
'md5': '53274c76ba7754fb0e8d072716f2292b',
|
||||||
@ -37,6 +37,9 @@ class LivestreamIE(InfoExtractor):
|
|||||||
'title': 'TEDCity2.0 (English)',
|
'title': 'TEDCity2.0 (English)',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 4,
|
'playlist_mincount': 4,
|
||||||
|
}, {
|
||||||
|
'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _parse_smil(self, video_id, smil_url):
|
def _parse_smil(self, video_id, smil_url):
|
||||||
|
@ -16,7 +16,7 @@ class MailRuIE(InfoExtractor):
|
|||||||
'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
|
'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
|
||||||
'md5': 'dea205f03120046894db4ebb6159879a',
|
'md5': 'dea205f03120046894db4ebb6159879a',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '46301138',
|
'id': '46301138_76',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
|
'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
|
||||||
'timestamp': 1393232740,
|
'timestamp': 1393232740,
|
||||||
@ -30,7 +30,7 @@ class MailRuIE(InfoExtractor):
|
|||||||
'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
|
'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
|
||||||
'md5': '00a91a58c3402204dcced523777b475f',
|
'md5': '00a91a58c3402204dcced523777b475f',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '46843144',
|
'id': '46843144_1263',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
|
'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
|
||||||
'timestamp': 1397217632,
|
'timestamp': 1397217632,
|
||||||
@ -54,30 +54,33 @@ class MailRuIE(InfoExtractor):
|
|||||||
|
|
||||||
author = video_data['author']
|
author = video_data['author']
|
||||||
uploader = author['name']
|
uploader = author['name']
|
||||||
uploader_id = author['id']
|
uploader_id = author.get('id') or author.get('email')
|
||||||
|
view_count = video_data.get('views_count')
|
||||||
|
|
||||||
movie = video_data['movie']
|
meta_data = video_data['meta']
|
||||||
content_id = str(movie['contentId'])
|
content_id = '%s_%s' % (
|
||||||
title = movie['title']
|
meta_data.get('accId', ''), meta_data['itemId'])
|
||||||
|
title = meta_data['title']
|
||||||
if title.endswith('.mp4'):
|
if title.endswith('.mp4'):
|
||||||
title = title[:-4]
|
title = title[:-4]
|
||||||
thumbnail = movie['poster']
|
thumbnail = meta_data['poster']
|
||||||
duration = movie['duration']
|
duration = meta_data['duration']
|
||||||
|
timestamp = meta_data['timestamp']
|
||||||
view_count = video_data['views_count']
|
|
||||||
|
|
||||||
formats = [
|
formats = [
|
||||||
{
|
{
|
||||||
'url': video['url'],
|
'url': video['url'],
|
||||||
'format_id': video['name'],
|
'format_id': video['key'],
|
||||||
|
'height': int(video['key'].rstrip('p'))
|
||||||
} for video in video_data['videos']
|
} for video in video_data['videos']
|
||||||
]
|
]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': content_id,
|
'id': content_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
'timestamp': video_data['timestamp'],
|
'timestamp': timestamp,
|
||||||
'uploader': uploader,
|
'uploader': uploader,
|
||||||
'uploader_id': uploader_id,
|
'uploader_id': uploader_id,
|
||||||
'duration': duration,
|
'duration': duration,
|
||||||
|
@ -33,7 +33,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
|||||||
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
|
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
|
||||||
if not m:
|
if not m:
|
||||||
return rtmp_video_url
|
return rtmp_video_url
|
||||||
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
|
base = 'http://viacommtvstrmfs.fplive.net/'
|
||||||
return base + m.group('finalid')
|
return base + m.group('finalid')
|
||||||
|
|
||||||
def _get_feed_url(self, uri):
|
def _get_feed_url(self, uri):
|
||||||
@ -145,7 +145,8 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
|||||||
idoc = self._download_xml(
|
idoc = self._download_xml(
|
||||||
feed_url + '?' + data, video_id,
|
feed_url + '?' + data, video_id,
|
||||||
'Downloading info', transform_source=fix_xml_ampersands)
|
'Downloading info', transform_source=fix_xml_ampersands)
|
||||||
return [self._get_video_info(item) for item in idoc.findall('.//item')]
|
return self.playlist_result(
|
||||||
|
[self._get_video_info(item) for item in idoc.findall('.//item')])
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
title = url_basename(url)
|
title = url_basename(url)
|
||||||
@ -186,7 +187,8 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
|
|||||||
def _get_feed_url(self, uri):
|
def _get_feed_url(self, uri):
|
||||||
video_id = self._id_from_uri(uri)
|
video_id = self._id_from_uri(uri)
|
||||||
site_id = uri.replace(video_id, '')
|
site_id = uri.replace(video_id, '')
|
||||||
config_url = 'http://media.mtvnservices.com/pmt/e1/players/{0}/config.xml'.format(site_id)
|
config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/'
|
||||||
|
'context4/context5/config.xml'.format(site_id))
|
||||||
config_doc = self._download_xml(config_url, video_id)
|
config_doc = self._download_xml(config_url, video_id)
|
||||||
feed_node = config_doc.find('.//feed')
|
feed_node = config_doc.find('.//feed')
|
||||||
feed_url = feed_node.text.strip().split('?')[0]
|
feed_url = feed_node.text.strip().split('?')[0]
|
||||||
|
@ -13,9 +13,10 @@ class MySpassIE(InfoExtractor):
|
|||||||
_VALID_URL = r'http://www\.myspass\.de/.*'
|
_VALID_URL = r'http://www\.myspass\.de/.*'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
|
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
|
||||||
'file': '11741.mp4',
|
|
||||||
'md5': '0b49f4844a068f8b33f4b7c88405862b',
|
'md5': '0b49f4844a068f8b33f4b7c88405862b',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
'id': '11741',
|
||||||
|
'ext': 'mp4',
|
||||||
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
|
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
|
||||||
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
|
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
|
||||||
},
|
},
|
||||||
|
@ -7,11 +7,12 @@ import re
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_ord,
|
compat_ord,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ class NDRIE(InfoExtractor):
|
|||||||
|
|
||||||
thumbnail = None
|
thumbnail = None
|
||||||
|
|
||||||
video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
|
video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page)
|
||||||
if video_url:
|
if video_url:
|
||||||
thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
|
thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
|
||||||
if thumbnails:
|
if thumbnails:
|
||||||
|
@ -12,6 +12,7 @@ from ..utils import (
|
|||||||
unified_strdate,
|
unified_strdate,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
ExtractorError,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -108,6 +109,9 @@ class NiconicoIE(InfoExtractor):
|
|||||||
flv_info_request, video_id,
|
flv_info_request, video_id,
|
||||||
note='Downloading flv info', errnote='Unable to download flv info')
|
note='Downloading flv info', errnote='Unable to download flv info')
|
||||||
|
|
||||||
|
if 'deleted=' in flv_info_webpage:
|
||||||
|
raise ExtractorError('The video has been deleted.',
|
||||||
|
expected=True)
|
||||||
video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]
|
video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]
|
||||||
|
|
||||||
# Start extracting information
|
# Start extracting information
|
||||||
@ -171,7 +175,8 @@ class NiconicoPlaylistIE(InfoExtractor):
|
|||||||
entries = [{
|
entries = [{
|
||||||
'_type': 'url',
|
'_type': 'url',
|
||||||
'ie_key': NiconicoIE.ie_key(),
|
'ie_key': NiconicoIE.ie_key(),
|
||||||
'url': 'http://www.nicovideo.jp/watch/%s' % entry['item_id'],
|
'url': ('http://www.nicovideo.jp/watch/%s' %
|
||||||
|
entry['item_data']['video_id']),
|
||||||
} for entry in entries]
|
} for entry in entries]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -7,6 +7,7 @@ from ..utils import (
|
|||||||
unified_strdate,
|
unified_strdate,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
qualities,
|
qualities,
|
||||||
|
strip_jsonp,
|
||||||
url_basename,
|
url_basename,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -63,7 +64,7 @@ class NPOIE(InfoExtractor):
|
|||||||
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
|
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
|
||||||
video_id,
|
video_id,
|
||||||
# We have to remove the javascript callback
|
# We have to remove the javascript callback
|
||||||
transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//.*$', r'\1', j)
|
transform_source=strip_jsonp,
|
||||||
)
|
)
|
||||||
token_page = self._download_webpage(
|
token_page = self._download_webpage(
|
||||||
'http://ida.omroep.nl/npoplayer/i.js',
|
'http://ida.omroep.nl/npoplayer/i.js',
|
||||||
|
@ -6,6 +6,7 @@ import os.path
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
)
|
)
|
||||||
@ -29,6 +30,12 @@ class PlayedIE(InfoExtractor):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
orig_webpage = self._download_webpage(url, video_id)
|
orig_webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
m_error = re.search(
|
||||||
|
r'(?s)Reason for deletion:.*?<b class="err"[^>]*>(?P<msg>[^<]+)</b>', orig_webpage)
|
||||||
|
if m_error:
|
||||||
|
raise ExtractorError(m_error.group('msg'), expected=True)
|
||||||
|
|
||||||
fields = re.findall(
|
fields = re.findall(
|
||||||
r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
|
r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
|
||||||
data = dict(fields)
|
data = dict(fields)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import compat_urllib_parse_unquote
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
|
||||||
|
|
||||||
class Ro220IE(InfoExtractor):
|
class Ro220IE(InfoExtractor):
|
||||||
|
@ -38,10 +38,11 @@ class RtlXlIE(InfoExtractor):
|
|||||||
progname = info['abstracts'][0]['name']
|
progname = info['abstracts'][0]['name']
|
||||||
subtitle = material['title'] or info['episodes'][0]['name']
|
subtitle = material['title'] or info['episodes'][0]['name']
|
||||||
|
|
||||||
videopath = material['videopath']
|
# Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118)
|
||||||
f4m_url = 'http://manifest.us.rtl.nl' + videopath
|
videopath = material['videopath'].replace('.f4m', '.m3u8')
|
||||||
|
m3u8_url = 'http://manifest.us.rtl.nl' + videopath
|
||||||
|
|
||||||
formats = self._extract_f4m_formats(f4m_url, uuid)
|
formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
|
||||||
|
|
||||||
video_urlpart = videopath.split('/flash/')[1][:-4]
|
video_urlpart = videopath.split('/flash/')[1][:-4]
|
||||||
PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
|
PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
|
||||||
@ -54,9 +55,12 @@ class RtlXlIE(InfoExtractor):
|
|||||||
{
|
{
|
||||||
'url': PG_URL_TEMPLATE % ('a3m', video_urlpart),
|
'url': PG_URL_TEMPLATE % ('a3m', video_urlpart),
|
||||||
'format_id': 'pg-hd',
|
'format_id': 'pg-hd',
|
||||||
|
'quality': 0,
|
||||||
}
|
}
|
||||||
])
|
])
|
||||||
|
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': uuid,
|
'id': uuid,
|
||||||
'title': '%s - %s' % (progname, subtitle),
|
'title': '%s - %s' % (progname, subtitle),
|
||||||
|
61
youtube_dl/extractor/sexu.py
Normal file
61
youtube_dl/extractor/sexu.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class SexuIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://sexu.com/961791/',
|
||||||
|
'md5': 'ff615aca9691053c94f8f10d96cd7884',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '961791',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b',
|
||||||
|
'description': 'md5:c5ed8625eb386855d5a7967bd7b77a54',
|
||||||
|
'categories': list, # NSFW
|
||||||
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
|
'age_limit': 18,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
quality_arr = self._search_regex(
|
||||||
|
r'sources:\s*\[([^\]]+)\]', webpage, 'forrmat string')
|
||||||
|
formats = [{
|
||||||
|
'url': fmt[0].replace('\\', ''),
|
||||||
|
'format_id': fmt[1],
|
||||||
|
'height': int(fmt[1][:3]),
|
||||||
|
} for fmt in re.findall(r'"file":"([^"]+)","label":"([^"]+)"', quality_arr)]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<title>([^<]+)\s*-\s*Sexu\.Com</title>', webpage, 'title')
|
||||||
|
|
||||||
|
description = self._html_search_meta(
|
||||||
|
'description', webpage, 'description')
|
||||||
|
|
||||||
|
thumbnail = self._html_search_regex(
|
||||||
|
r'image:\s*"([^"]+)"',
|
||||||
|
webpage, 'thumbnail', fatal=False)
|
||||||
|
|
||||||
|
categories_str = self._html_search_meta(
|
||||||
|
'keywords', webpage, 'categories')
|
||||||
|
categories = (
|
||||||
|
None if categories_str is None
|
||||||
|
else categories_str.split(','))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
'categories': categories,
|
||||||
|
'formats': formats,
|
||||||
|
'age_limit': 18,
|
||||||
|
}
|
@ -4,11 +4,12 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
|
from .spiegeltv import SpiegeltvIE
|
||||||
|
|
||||||
|
|
||||||
class SpiegelIE(InfoExtractor):
|
class SpiegelIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
|
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
|
'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
|
||||||
'md5': '2c2754212136f35fb4b19767d242f66e',
|
'md5': '2c2754212136f35fb4b19767d242f66e',
|
||||||
@ -29,16 +30,28 @@ class SpiegelIE(InfoExtractor):
|
|||||||
'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
|
'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
|
||||||
'duration': 983,
|
'duration': 983,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
|
||||||
|
'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1519126',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
|
||||||
|
'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
|
||||||
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
m = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = m.group('videoID')
|
webpage, handle = self._download_webpage_handle(url, video_id)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
# 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
|
||||||
|
if SpiegeltvIE.suitable(handle.geturl()):
|
||||||
|
return self.url_result(handle.geturl(), 'Spiegeltv')
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = re.sub(r'\s+', ' ', self._html_search_regex(
|
||||||
r'<div class="module-title">(.*?)</div>', webpage, 'title')
|
r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>',
|
||||||
|
webpage, 'title'))
|
||||||
description = self._html_search_meta('description', webpage, 'description')
|
description = self._html_search_meta('description', webpage, 'description')
|
||||||
|
|
||||||
base_url = self._search_regex(
|
base_url = self._search_regex(
|
||||||
@ -79,7 +92,7 @@ class SpiegelArticleIE(InfoExtractor):
|
|||||||
_VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
|
_VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
|
||||||
IE_NAME = 'Spiegel:Article'
|
IE_NAME = 'Spiegel:Article'
|
||||||
IE_DESC = 'Articles on spiegel.de'
|
IE_DESC = 'Articles on spiegel.de'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
|
'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '1516455',
|
'id': '1516455',
|
||||||
@ -87,20 +100,34 @@ class SpiegelArticleIE(InfoExtractor):
|
|||||||
'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
|
'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
|
||||||
'description': 're:^Patrick Kämnitz gehört.{100,}',
|
'description': 're:^Patrick Kämnitz gehört.{100,}',
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
|
||||||
|
'info_dict': {
|
||||||
|
|
||||||
|
},
|
||||||
|
'playlist_count': 6,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
m = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = m.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
# Single video on top of the page
|
||||||
video_link = self._search_regex(
|
video_link = self._search_regex(
|
||||||
r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
|
r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
|
||||||
'video page URL')
|
'video page URL', default=None)
|
||||||
video_url = compat_urlparse.urljoin(
|
if video_link:
|
||||||
self.http_scheme() + '//spiegel.de/', video_link)
|
video_url = compat_urlparse.urljoin(
|
||||||
|
self.http_scheme() + '//spiegel.de/', video_link)
|
||||||
|
return self.url_result(video_url)
|
||||||
|
|
||||||
return {
|
# Multiple embedded videos
|
||||||
'_type': 'url',
|
embeds = re.findall(
|
||||||
'url': video_url,
|
r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
|
||||||
}
|
webpage)
|
||||||
|
entries = [
|
||||||
|
self.url_result(compat_urlparse.urljoin(
|
||||||
|
self.http_scheme() + '//spiegel.de/', embed_path))
|
||||||
|
for embed_path in embeds
|
||||||
|
]
|
||||||
|
return self.playlist_result(entries)
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import float_or_none
|
||||||
|
|
||||||
|
|
||||||
class SpiegeltvIE(InfoExtractor):
|
class SpiegeltvIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?spiegel\.tv/filme/(?P<id>[\-a-z0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?spiegel\.tv/(?:#/)?filme/(?P<id>[\-a-z0-9]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.spiegel.tv/filme/flug-mh370/',
|
'url': 'http://www.spiegel.tv/filme/flug-mh370/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'flug-mh370',
|
'id': 'flug-mh370',
|
||||||
@ -20,12 +20,15 @@ class SpiegeltvIE(InfoExtractor):
|
|||||||
# rtmp download
|
# rtmp download
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
}
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://www.spiegel.tv/#/filme/alleskino-die-wahrheit-ueber-maenner/',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
if '/#/' in url:
|
||||||
video_id = mobj.group('id')
|
url = url.replace('/#/', '/')
|
||||||
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title')
|
title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title')
|
||||||
|
|
||||||
@ -61,12 +64,8 @@ class SpiegeltvIE(InfoExtractor):
|
|||||||
})
|
})
|
||||||
|
|
||||||
description = media_json['subtitle']
|
description = media_json['subtitle']
|
||||||
duration = media_json['duration_in_ms'] / 1000.
|
duration = float_or_none(media_json.get('duration_in_ms'), scale=1000)
|
||||||
|
format = '16x9' if is_wide else '4x3'
|
||||||
if is_wide:
|
|
||||||
format = '16x9'
|
|
||||||
else:
|
|
||||||
format = '4x3'
|
|
||||||
|
|
||||||
url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v'
|
url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v'
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -9,24 +11,23 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class StanfordOpenClassroomIE(InfoExtractor):
|
class StanfordOpenClassroomIE(InfoExtractor):
|
||||||
IE_NAME = u'stanfordoc'
|
IE_NAME = 'stanfordoc'
|
||||||
IE_DESC = u'Stanford Open ClassRoom'
|
IE_DESC = 'Stanford Open ClassRoom'
|
||||||
_VALID_URL = r'^(?:https?://)?openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
|
_VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
|
'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
|
||||||
u'file': u'PracticalUnix_intro-environment.mp4',
|
'md5': '544a9468546059d4e80d76265b0443b8',
|
||||||
u'md5': u'544a9468546059d4e80d76265b0443b8',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': 'PracticalUnix_intro-environment',
|
||||||
u"title": u"Intro Environment"
|
'ext': 'mp4',
|
||||||
|
'title': 'Intro Environment',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
|
|
||||||
if mobj.group('course') and mobj.group('video'): # A specific video
|
if mobj.group('course') and mobj.group('video'): # A specific video
|
||||||
course = mobj.group('course')
|
course = mobj.group('course')
|
||||||
video = mobj.group('video')
|
video = mobj.group('video')
|
||||||
info = {
|
info = {
|
||||||
@ -35,7 +36,6 @@ class StanfordOpenClassroomIE(InfoExtractor):
|
|||||||
'upload_date': None,
|
'upload_date': None,
|
||||||
}
|
}
|
||||||
|
|
||||||
self.report_extraction(info['id'])
|
|
||||||
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
|
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
|
||||||
xmlUrl = baseUrl + video + '.xml'
|
xmlUrl = baseUrl + video + '.xml'
|
||||||
mdoc = self._download_xml(xmlUrl, info['id'])
|
mdoc = self._download_xml(xmlUrl, info['id'])
|
||||||
@ -43,63 +43,49 @@ class StanfordOpenClassroomIE(InfoExtractor):
|
|||||||
info['title'] = mdoc.findall('./title')[0].text
|
info['title'] = mdoc.findall('./title')[0].text
|
||||||
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
|
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise ExtractorError(u'Invalid metadata XML file')
|
raise ExtractorError('Invalid metadata XML file')
|
||||||
info['ext'] = info['url'].rpartition('.')[2]
|
return info
|
||||||
return [info]
|
elif mobj.group('course'): # A course page
|
||||||
elif mobj.group('course'): # A course page
|
|
||||||
course = mobj.group('course')
|
course = mobj.group('course')
|
||||||
info = {
|
info = {
|
||||||
'id': course,
|
'id': course,
|
||||||
'type': 'playlist',
|
'_type': 'playlist',
|
||||||
'uploader': None,
|
'uploader': None,
|
||||||
'upload_date': None,
|
'upload_date': None,
|
||||||
}
|
}
|
||||||
|
|
||||||
coursepage = self._download_webpage(url, info['id'],
|
coursepage = self._download_webpage(
|
||||||
note='Downloading course info page',
|
url, info['id'],
|
||||||
errnote='Unable to download course info page')
|
note='Downloading course info page',
|
||||||
|
errnote='Unable to download course info page')
|
||||||
|
|
||||||
info['title'] = self._html_search_regex('<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
|
info['title'] = self._html_search_regex(
|
||||||
|
r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
|
||||||
|
|
||||||
info['description'] = self._html_search_regex('<description>([^<]+)</description>',
|
info['description'] = self._html_search_regex(
|
||||||
coursepage, u'description', fatal=False)
|
r'(?s)<description>([^<]+)</description>',
|
||||||
|
coursepage, 'description', fatal=False)
|
||||||
|
|
||||||
links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
|
links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
|
||||||
info['list'] = [
|
info['entries'] = [self.url_result(
|
||||||
{
|
'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
|
||||||
'type': 'reference',
|
) for l in links]
|
||||||
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
|
return info
|
||||||
}
|
else: # Root page
|
||||||
for vpage in links]
|
|
||||||
results = []
|
|
||||||
for entry in info['list']:
|
|
||||||
assert entry['type'] == 'reference'
|
|
||||||
results += self.extract(entry['url'])
|
|
||||||
return results
|
|
||||||
else: # Root page
|
|
||||||
info = {
|
info = {
|
||||||
'id': 'Stanford OpenClassroom',
|
'id': 'Stanford OpenClassroom',
|
||||||
'type': 'playlist',
|
'_type': 'playlist',
|
||||||
'uploader': None,
|
'uploader': None,
|
||||||
'upload_date': None,
|
'upload_date': None,
|
||||||
}
|
}
|
||||||
|
info['title'] = info['id']
|
||||||
|
|
||||||
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
|
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
|
||||||
rootpage = self._download_webpage(rootURL, info['id'],
|
rootpage = self._download_webpage(rootURL, info['id'],
|
||||||
errnote=u'Unable to download course info page')
|
errnote='Unable to download course info page')
|
||||||
|
|
||||||
info['title'] = info['id']
|
|
||||||
|
|
||||||
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
|
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
|
||||||
info['list'] = [
|
info['entries'] = [self.url_result(
|
||||||
{
|
'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
|
||||||
'type': 'reference',
|
) for l in links]
|
||||||
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
|
return info
|
||||||
}
|
|
||||||
for cpage in links]
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for entry in info['list']:
|
|
||||||
assert entry['type'] == 'reference'
|
|
||||||
results += self.extract(entry['url'])
|
|
||||||
return results
|
|
||||||
|
@ -13,7 +13,7 @@ from ..utils import (
|
|||||||
|
|
||||||
class StreamcloudIE(InfoExtractor):
|
class StreamcloudIE(InfoExtractor):
|
||||||
IE_NAME = 'streamcloud.eu'
|
IE_NAME = 'streamcloud.eu'
|
||||||
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)/(?P<fname>[^#?]*)\.html'
|
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
|
'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
|
||||||
@ -27,8 +27,8 @@ class StreamcloudIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
url = 'http://streamcloud.eu/%s' % video_id
|
||||||
|
|
||||||
orig_webpage = self._download_webpage(url, video_id)
|
orig_webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ class SWRMediathekIE(InfoExtractor):
|
|||||||
|
|
||||||
if media_type == 'Video':
|
if media_type == 'Video':
|
||||||
fmt.update({
|
fmt.update({
|
||||||
'format_note': ['144p', '288p', '544p'][quality-1],
|
'format_note': ['144p', '288p', '544p', '720p'][quality-1],
|
||||||
'vcodec': codec,
|
'vcodec': codec,
|
||||||
})
|
})
|
||||||
elif media_type == 'Audio':
|
elif media_type == 'Audio':
|
||||||
|
@ -1,27 +1,24 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
from __future__ import unicode_literals
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import determine_ext
|
|
||||||
|
|
||||||
|
|
||||||
class SztvHuIE(InfoExtractor):
|
class SztvHuIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:http://)?(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
|
_VALID_URL = r'http://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
|
'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
|
||||||
u'file': u'20130909.mp4',
|
'md5': 'a6df607b11fb07d0e9f2ad94613375cb',
|
||||||
u'md5': u'a6df607b11fb07d0e9f2ad94613375cb',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '20130909',
|
||||||
u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren",
|
'ext': 'mp4',
|
||||||
u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
|
'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren',
|
||||||
|
'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
|
||||||
},
|
},
|
||||||
u'skip': u'Service temporarily disabled as of 2013-11-20'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
video_file = self._search_regex(
|
video_file = self._search_regex(
|
||||||
r'file: "...:(.*?)",', webpage, 'video file')
|
r'file: "...:(.*?)",', webpage, 'video file')
|
||||||
@ -39,7 +36,6 @@ class SztvHuIE(InfoExtractor):
|
|||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'title': title,
|
'title': title,
|
||||||
'ext': determine_ext(video_url),
|
|
||||||
'description': description,
|
'description': description,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
@ -50,6 +50,7 @@ class TapelyIE(InfoExtractor):
|
|||||||
request = compat_urllib_request.Request(playlist_url)
|
request = compat_urllib_request.Request(playlist_url)
|
||||||
request.add_header('X-Requested-With', 'XMLHttpRequest')
|
request.add_header('X-Requested-With', 'XMLHttpRequest')
|
||||||
request.add_header('Accept', 'application/json')
|
request.add_header('Accept', 'application/json')
|
||||||
|
request.add_header('Referer', url)
|
||||||
|
|
||||||
playlist = self._download_json(request, display_id)
|
playlist = self._download_json(request, display_id)
|
||||||
|
|
||||||
|
@ -38,6 +38,7 @@ class TEDIE(SubtitlesInfoExtractor):
|
|||||||
'actively fooling us.'),
|
'actively fooling us.'),
|
||||||
'uploader': 'Dan Dennett',
|
'uploader': 'Dan Dennett',
|
||||||
'width': 854,
|
'width': 854,
|
||||||
|
'duration': 1308,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms',
|
'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms',
|
||||||
@ -57,6 +58,7 @@ class TEDIE(SubtitlesInfoExtractor):
|
|||||||
'title': 'Be passionate. Be courageous. Be your best.',
|
'title': 'Be passionate. Be courageous. Be your best.',
|
||||||
'uploader': 'Gabby Giffords and Mark Kelly',
|
'uploader': 'Gabby Giffords and Mark Kelly',
|
||||||
'description': 'md5:5174aed4d0f16021b704120360f72b92',
|
'description': 'md5:5174aed4d0f16021b704120360f72b92',
|
||||||
|
'duration': 1128,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.ted.com/playlists/who_are_the_hackers',
|
'url': 'http://www.ted.com/playlists/who_are_the_hackers',
|
||||||
@ -178,6 +180,7 @@ class TEDIE(SubtitlesInfoExtractor):
|
|||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
'subtitles': video_subtitles,
|
'subtitles': video_subtitles,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
'duration': talk_info.get('duration'),
|
||||||
}
|
}
|
||||||
|
|
||||||
def _get_available_subtitles(self, video_id, talk_info):
|
def _get_available_subtitles(self, video_id, talk_info):
|
||||||
|
60
youtube_dl/extractor/telebruxelles.py
Normal file
60
youtube_dl/extractor/telebruxelles.py
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class TeleBruxellesIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?telebruxelles\.be/(news|sport|dernier-jt)/?(?P<id>[^/#?]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.telebruxelles.be/news/auditions-devant-parlement-francken-galant-tres-attendus/',
|
||||||
|
'md5': '59439e568c9ee42fb77588b2096b214f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '11942',
|
||||||
|
'display_id': 'auditions-devant-parlement-francken-galant-tres-attendus',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Parlement : Francken et Galant répondent aux interpellations de l’opposition',
|
||||||
|
'description': 're:Les auditions des ministres se poursuivent*'
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': 'requires rtmpdump'
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.telebruxelles.be/sport/basket-brussels-bat-mons-80-74/',
|
||||||
|
'md5': '181d3fbdcf20b909309e5aef5c6c6047',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '10091',
|
||||||
|
'display_id': 'basket-brussels-bat-mons-80-74',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Basket : le Brussels bat Mons 80-74',
|
||||||
|
'description': 're:^Ils l\u2019on fait ! En basket, le B*',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': 'requires rtmpdump'
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
article_id = self._html_search_regex(
|
||||||
|
r"<article id=\"post-(\d+)\"", webpage, 'article ID')
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<h1 class=\"entry-title\">(.*?)</h1>', webpage, 'title')
|
||||||
|
description = self._og_search_description(webpage)
|
||||||
|
|
||||||
|
rtmp_url = self._html_search_regex(
|
||||||
|
r"file: \"(rtmp://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}/vod/mp4:\" \+ \"\w+\" \+ \".mp4)\"",
|
||||||
|
webpage, 'RTMP url')
|
||||||
|
rtmp_url = rtmp_url.replace("\" + \"", "")
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': article_id,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'url': rtmp_url,
|
||||||
|
'ext': 'flv',
|
||||||
|
'rtmp_live': True # if rtmpdump is not called with "--live" argument, the download is blocked and can be completed
|
||||||
|
}
|
@ -29,7 +29,7 @@ class TruTubeIE(InfoExtractor):
|
|||||||
|
|
||||||
# filehd is always 404
|
# filehd is always 404
|
||||||
video_url = xpath_text(config, './file', 'video URL', fatal=True)
|
video_url = xpath_text(config, './file', 'video URL', fatal=True)
|
||||||
title = xpath_text(config, './title', 'title')
|
title = xpath_text(config, './title', 'title').strip()
|
||||||
thumbnail = xpath_text(config, './image', ' thumbnail')
|
thumbnail = xpath_text(config, './image', ' thumbnail')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -4,9 +4,9 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
compat_str,
|
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
qualities,
|
qualities,
|
||||||
)
|
)
|
||||||
@ -176,8 +176,7 @@ class TVPlayIE(InfoExtractor):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
video = self._download_json(
|
video = self._download_json(
|
||||||
'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
|
'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
|
||||||
@ -208,6 +207,10 @@ class TVPlayIE(InfoExtractor):
|
|||||||
'app': m.group('app'),
|
'app': m.group('app'),
|
||||||
'play_path': m.group('playpath'),
|
'play_path': m.group('playpath'),
|
||||||
})
|
})
|
||||||
|
elif video_url.endswith('.f4m'):
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
video_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id))
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
fmt.update({
|
fmt.update({
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
|
@ -5,7 +5,6 @@ import re
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
get_meta_content,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -79,7 +78,7 @@ class UstreamChannelIE(InfoExtractor):
|
|||||||
m = re.match(self._VALID_URL, url)
|
m = re.match(self._VALID_URL, url)
|
||||||
display_id = m.group('slug')
|
display_id = m.group('slug')
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
channel_id = get_meta_content('ustream:channel_id', webpage)
|
channel_id = self._html_search_meta('ustream:channel_id', webpage)
|
||||||
|
|
||||||
BASE = 'http://www.ustream.tv'
|
BASE = 'http://www.ustream.tv'
|
||||||
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
|
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
|
||||||
|
@ -121,4 +121,7 @@ class VH1IE(MTVIE):
|
|||||||
idoc = self._download_xml(
|
idoc = self._download_xml(
|
||||||
doc_url, video_id,
|
doc_url, video_id,
|
||||||
'Downloading info', transform_source=fix_xml_ampersands)
|
'Downloading info', transform_source=fix_xml_ampersands)
|
||||||
return [self._get_video_info(item) for item in idoc.findall('.//item')]
|
return self.playlist_result(
|
||||||
|
[self._get_video_info(item) for item in idoc.findall('.//item')],
|
||||||
|
playlist_id=video_id,
|
||||||
|
)
|
||||||
|
38
youtube_dl/extractor/vice.py
Normal file
38
youtube_dl/extractor/vice.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from .ooyala import OoyalaIE
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class ViceIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://www\.vice\.com/.*?/(?P<name>.+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# Requires ffmpeg (m3u8 manifest)
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
name = mobj.group('name')
|
||||||
|
webpage = self._download_webpage(url, name)
|
||||||
|
try:
|
||||||
|
embed_code = self._search_regex(
|
||||||
|
r'embedCode=([^&\'"]+)', webpage,
|
||||||
|
'ooyala embed code')
|
||||||
|
ooyala_url = OoyalaIE._url_for_embed_code(embed_code)
|
||||||
|
print(ooyala_url)
|
||||||
|
except ExtractorError:
|
||||||
|
raise ExtractorError('The page doesn\'t contain a video', expected=True)
|
||||||
|
return self.url_result(ooyala_url, ie='Ooyala')
|
||||||
|
|
@ -7,11 +7,13 @@ import itertools
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .subtitles import SubtitlesInfoExtractor
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -37,7 +37,7 @@ class WimpIE(InfoExtractor):
|
|||||||
video_id = mobj.group(1)
|
video_id = mobj.group(1)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
video_url = self._search_regex(
|
video_url = self._search_regex(
|
||||||
r"'file'\s*:\s*'([^']+)'", webpage, 'video URL')
|
r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", webpage, 'video URL')
|
||||||
if YoutubeIE.suitable(video_url):
|
if YoutubeIE.suitable(video_url):
|
||||||
self.to_screen('Found YouTube video')
|
self.to_screen('Found YouTube video')
|
||||||
return {
|
return {
|
||||||
|
@ -27,15 +27,15 @@ class WrzutaIE(InfoExtractor):
|
|||||||
'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
|
'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://w729.wrzuta.pl/audio/9oXJqdcndqv/david_guetta_amp_showtek_ft._vassy_-_bad',
|
'url': 'http://jolka85.wrzuta.pl/audio/063jOPX5ue2/liber_natalia_szroeder_-_teraz_ty',
|
||||||
'md5': '1e546a18e1c22ac6e9adce17b8961ff5',
|
'md5': 'bc78077859bea7bcfe4295d7d7fc9025',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '9oXJqdcndqv',
|
'id': '063jOPX5ue2',
|
||||||
'ext': 'ogg',
|
'ext': 'ogg',
|
||||||
'title': 'David Guetta & Showtek ft. Vassy - Bad',
|
'title': 'Liber & Natalia Szroeder - Teraz Ty',
|
||||||
'duration': 270,
|
'duration': 203,
|
||||||
'uploader_id': 'w729',
|
'uploader_id': 'jolka85',
|
||||||
'description': 'md5:4628f01c666bbaaecefa83476cfa794a',
|
'description': 'md5:2d2b6340f9188c8c4cd891580e481096',
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@ -49,16 +49,17 @@ class WrzutaIE(InfoExtractor):
|
|||||||
|
|
||||||
quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
|
quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
|
||||||
|
|
||||||
audio_table = {'flv': 'mp3', 'webm': 'ogg'}
|
audio_table = {'flv': 'mp3', 'webm': 'ogg', '???': 'mp3'}
|
||||||
|
|
||||||
embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id)
|
embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for media in embedpage['url']:
|
for media in embedpage['url']:
|
||||||
|
fmt = media['type'].split('@')[0]
|
||||||
if typ == 'audio':
|
if typ == 'audio':
|
||||||
ext = audio_table[media['type'].split('@')[0]]
|
ext = audio_table.get(fmt, fmt)
|
||||||
else:
|
else:
|
||||||
ext = media['type'].split('@')[0]
|
ext = fmt
|
||||||
|
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': '%s_%s' % (ext, media['quality'].lower()),
|
'format_id': '%s_%s' % (ext, media['quality'].lower()),
|
||||||
|
@ -9,40 +9,30 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class YouJizzIE(InfoExtractor):
|
class YouJizzIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+)\.html$'
|
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
|
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
|
||||||
'file': '2189178.flv',
|
|
||||||
'md5': '07e15fa469ba384c7693fd246905547c',
|
'md5': '07e15fa469ba384c7693fd246905547c',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
'id': '2189178',
|
||||||
|
'ext': 'flv',
|
||||||
"title": "Zeichentrick 1",
|
"title": "Zeichentrick 1",
|
||||||
"age_limit": 18,
|
"age_limit": 18,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
video_id = mobj.group('videoid')
|
|
||||||
|
|
||||||
# Get webpage content
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
age_limit = self._rta_search(webpage)
|
age_limit = self._rta_search(webpage)
|
||||||
|
video_title = self._html_search_regex(
|
||||||
|
r'<title>\s*(.*)\s*</title>', webpage, 'title')
|
||||||
|
|
||||||
# Get the video title
|
embed_page_url = self._search_regex(
|
||||||
video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
|
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
|
||||||
webpage, 'title').strip()
|
webpage, 'embed page')
|
||||||
|
webpage = self._download_webpage(
|
||||||
# Get the embed page
|
embed_page_url, video_id, note='downloading embed page')
|
||||||
result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
|
|
||||||
if result is None:
|
|
||||||
raise ExtractorError('ERROR: unable to extract embed page')
|
|
||||||
|
|
||||||
embed_page_url = result.group(0).strip()
|
|
||||||
video_id = result.group('videoid')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(embed_page_url, video_id)
|
|
||||||
|
|
||||||
# Get the video URL
|
# Get the video URL
|
||||||
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
|
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
|
||||||
|
@ -307,6 +307,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
|
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
|
||||||
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
|
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
|
||||||
|
|
||||||
|
# Dash webm audio with opus inside
|
||||||
|
'249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
|
||||||
|
'250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
|
||||||
|
'251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
|
||||||
|
|
||||||
# RTMP (unnamed)
|
# RTMP (unnamed)
|
||||||
'_rtmp': {'protocol': 'rtmp'},
|
'_rtmp': {'protocol': 'rtmp'},
|
||||||
}
|
}
|
||||||
@ -401,6 +406,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'format': '141',
|
'format': '141',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
# Controversy video
|
||||||
|
{
|
||||||
|
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'T4XJQO3qol8',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'upload_date': '20100909',
|
||||||
|
'uploader': 'The Amazing Atheist',
|
||||||
|
'uploader_id': 'TheAmazingAtheist',
|
||||||
|
'title': 'Burning Everyone\'s Koran',
|
||||||
|
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
|
||||||
|
}
|
||||||
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
@ -510,7 +528,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
|
|
||||||
def _parse_sig_js(self, jscode):
|
def _parse_sig_js(self, jscode):
|
||||||
funcname = self._search_regex(
|
funcname = self._search_regex(
|
||||||
r'signature=([$a-zA-Z]+)', jscode,
|
r'\.sig\|\|([a-zA-Z0-9]+)\(', jscode,
|
||||||
'Initial JS player signature function name')
|
'Initial JS player signature function name')
|
||||||
|
|
||||||
jsi = JSInterpreter(jscode)
|
jsi = JSInterpreter(jscode)
|
||||||
@ -661,7 +679,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
video_id = self.extract_id(url)
|
video_id = self.extract_id(url)
|
||||||
|
|
||||||
# Get video webpage
|
# Get video webpage
|
||||||
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
|
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
|
||||||
pref_cookies = [
|
pref_cookies = [
|
||||||
c for c in self._downloader.cookiejar
|
c for c in self._downloader.cookiejar
|
||||||
if c.domain == '.youtube.com' and c.name == 'PREF']
|
if c.domain == '.youtube.com' and c.name == 'PREF']
|
||||||
@ -684,7 +702,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
# Get video info
|
# Get video info
|
||||||
self.report_video_info_webpage_download(video_id)
|
self.report_video_info_webpage_download(video_id)
|
||||||
if re.search(r'player-age-gate-content">', video_webpage) is not None:
|
if re.search(r'player-age-gate-content">', video_webpage) is not None:
|
||||||
self.report_age_confirmation()
|
|
||||||
age_gate = True
|
age_gate = True
|
||||||
# We simulate the access to the video from www.youtube.com/v/{video_id}
|
# We simulate the access to the video from www.youtube.com/v/{video_id}
|
||||||
# this can be viewed without login into Youtube
|
# this can be viewed without login into Youtube
|
||||||
@ -692,12 +709,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'video_id': video_id,
|
'video_id': video_id,
|
||||||
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
|
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
|
||||||
'sts': self._search_regex(
|
'sts': self._search_regex(
|
||||||
r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
|
r'"sts"\s*:\s*(\d+)', video_webpage, 'sts', default=''),
|
||||||
})
|
})
|
||||||
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
|
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
|
||||||
video_info_webpage = self._download_webpage(video_info_url, video_id,
|
video_info_webpage = self._download_webpage(
|
||||||
note=False,
|
video_info_url, video_id,
|
||||||
errnote='unable to download video info webpage')
|
note='Refetching age-gated info webpage',
|
||||||
|
errnote='unable to download video info webpage')
|
||||||
video_info = compat_parse_qs(video_info_webpage)
|
video_info = compat_parse_qs(video_info_webpage)
|
||||||
else:
|
else:
|
||||||
age_gate = False
|
age_gate = False
|
||||||
@ -991,7 +1009,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
existing_format.update(f)
|
existing_format.update(f)
|
||||||
|
|
||||||
except (ExtractorError, KeyError) as e:
|
except (ExtractorError, KeyError) as e:
|
||||||
self.report_warning('Skipping DASH manifest: %s' % e, video_id)
|
self.report_warning('Skipping DASH manifest: %r' % e, video_id)
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
@ -1043,6 +1061,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
|||||||
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
|
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'title': 'ytdl test PL',
|
'title': 'ytdl test PL',
|
||||||
|
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
|
||||||
},
|
},
|
||||||
'playlist_count': 3,
|
'playlist_count': 3,
|
||||||
}, {
|
}, {
|
||||||
|
@ -61,7 +61,7 @@ class JSInterpreter(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
m = re.match(
|
m = re.match(
|
||||||
r'^(?P<var>[a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$',
|
r'^(?P<var>[$a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$',
|
||||||
expr)
|
expr)
|
||||||
if m:
|
if m:
|
||||||
variable = m.group('var')
|
variable = m.group('var')
|
||||||
|
@ -5,9 +5,12 @@ import optparse
|
|||||||
import shlex
|
import shlex
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from .utils import (
|
from .compat import (
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_getenv,
|
compat_getenv,
|
||||||
|
compat_kwargs,
|
||||||
|
)
|
||||||
|
from .utils import (
|
||||||
get_term_width,
|
get_term_width,
|
||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
@ -110,7 +113,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
'conflict_handler': 'resolve',
|
'conflict_handler': 'resolve',
|
||||||
}
|
}
|
||||||
|
|
||||||
parser = optparse.OptionParser(**kw)
|
parser = optparse.OptionParser(**compat_kwargs(kw))
|
||||||
|
|
||||||
general = optparse.OptionGroup(parser, 'General Options')
|
general = optparse.OptionGroup(parser, 'General Options')
|
||||||
general.add_option(
|
general.add_option(
|
||||||
@ -259,7 +262,16 @@ def parseOpts(overrideArguments=None):
|
|||||||
video_format.add_option(
|
video_format.add_option(
|
||||||
'-f', '--format',
|
'-f', '--format',
|
||||||
action='store', dest='format', metavar='FORMAT', default=None,
|
action='store', dest='format', metavar='FORMAT', default=None,
|
||||||
help='video format code, specify the order of preference using slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio')
|
help='video format code, specify the order of preference using'
|
||||||
|
' slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also'
|
||||||
|
' supported. You can also use the special names "best",'
|
||||||
|
' "bestvideo", "bestaudio", "worst", "worstvideo" and'
|
||||||
|
' "worstaudio". By default, youtube-dl will pick the best quality.'
|
||||||
|
' Use commas to download multiple audio formats, such as'
|
||||||
|
' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.'
|
||||||
|
' You can merge the video and audio of two formats into a single'
|
||||||
|
' file using -f <video-format>+<audio-format> (requires ffmpeg or'
|
||||||
|
' avconv), for example -f bestvideo+bestaudio.')
|
||||||
video_format.add_option(
|
video_format.add_option(
|
||||||
'--all-formats',
|
'--all-formats',
|
||||||
action='store_const', dest='format', const='all',
|
action='store_const', dest='format', const='all',
|
||||||
@ -479,10 +491,12 @@ def parseOpts(overrideArguments=None):
|
|||||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
||||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
'%(upload_date)s for the upload date (YYYYMMDD), '
|
||||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
'%(id)s for the video id, '
|
||||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
'%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, '
|
||||||
|
'%(playlist_index)s for the position in the playlist. '
|
||||||
'%(height)s and %(width)s for the width and height of the video format. '
|
'%(height)s and %(width)s for the width and height of the video format. '
|
||||||
'%(resolution)s for a textual description of the resolution of the video format. '
|
'%(resolution)s for a textual description of the resolution of the video format. '
|
||||||
|
'%% for a literal percent. '
|
||||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
'Use - to output to stdout. Can also be used to download to a different directory, '
|
||||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||||
filesystem.add_option(
|
filesystem.add_option(
|
||||||
|
@ -6,10 +6,11 @@ import os
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from .common import PostProcessor
|
from .common import PostProcessor
|
||||||
|
from ..compat import (
|
||||||
|
compat_urlretrieve,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
check_executable,
|
check_executable,
|
||||||
compat_urlretrieve,
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
PostProcessingError,
|
PostProcessingError,
|
||||||
prepend_extension,
|
prepend_extension,
|
||||||
|
@ -3,10 +3,8 @@ from __future__ import unicode_literals
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from .common import PostProcessor
|
from .common import PostProcessor
|
||||||
from ..utils import (
|
from ..compat import shlex_quote
|
||||||
shlex_quote,
|
from ..utils import PostProcessingError
|
||||||
PostProcessingError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ExecAfterDownloadPP(PostProcessor):
|
class ExecAfterDownloadPP(PostProcessor):
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
@ -7,10 +6,13 @@ import time
|
|||||||
|
|
||||||
from .common import AudioConversionError, PostProcessor
|
from .common import AudioConversionError, PostProcessor
|
||||||
|
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_subprocess_get_DEVNULL,
|
compat_subprocess_get_DEVNULL,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
get_exe_version,
|
||||||
is_outdated_version,
|
is_outdated_version,
|
||||||
PostProcessingError,
|
PostProcessingError,
|
||||||
prepend_extension,
|
prepend_extension,
|
||||||
@ -19,23 +21,6 @@ from ..utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_version(executable):
|
|
||||||
""" Returns the version of the specified executable,
|
|
||||||
or False if the executable is not present """
|
|
||||||
try:
|
|
||||||
out, err = subprocess.Popen(
|
|
||||||
[executable, '-version'],
|
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
|
|
||||||
except OSError:
|
|
||||||
return False
|
|
||||||
firstline = out.partition(b'\n')[0].decode('ascii', 'ignore')
|
|
||||||
m = re.search(r'version\s+([0-9._-a-zA-Z]+)', firstline)
|
|
||||||
if not m:
|
|
||||||
return u'present'
|
|
||||||
else:
|
|
||||||
return m.group(1)
|
|
||||||
|
|
||||||
|
|
||||||
class FFmpegPostProcessorError(PostProcessingError):
|
class FFmpegPostProcessorError(PostProcessingError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -61,7 +46,7 @@ class FFmpegPostProcessor(PostProcessor):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_versions():
|
def get_versions():
|
||||||
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
||||||
return dict((program, get_version(program)) for program in programs)
|
return dict((p, get_exe_version(p, args=['-version'])) for p in programs)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _executable(self):
|
def _executable(self):
|
||||||
|
@ -3,10 +3,12 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from .common import PostProcessor
|
from .common import PostProcessor
|
||||||
|
from ..compat import (
|
||||||
|
subprocess_check_output
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
check_executable,
|
check_executable,
|
||||||
hyphenate_date,
|
hyphenate_date,
|
||||||
subprocess_check_output
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -62,15 +62,17 @@ class _ScopeDict(dict):
|
|||||||
|
|
||||||
|
|
||||||
class _AVMClass(object):
|
class _AVMClass(object):
|
||||||
def __init__(self, name_idx, name):
|
def __init__(self, name_idx, name, static_properties=None):
|
||||||
self.name_idx = name_idx
|
self.name_idx = name_idx
|
||||||
self.name = name
|
self.name = name
|
||||||
self.method_names = {}
|
self.method_names = {}
|
||||||
self.method_idxs = {}
|
self.method_idxs = {}
|
||||||
self.methods = {}
|
self.methods = {}
|
||||||
self.method_pyfunctions = {}
|
self.method_pyfunctions = {}
|
||||||
|
self.static_properties = static_properties if static_properties else {}
|
||||||
|
|
||||||
self.variables = _ScopeDict(self)
|
self.variables = _ScopeDict(self)
|
||||||
|
self.constants = {}
|
||||||
|
|
||||||
def make_object(self):
|
def make_object(self):
|
||||||
return _AVMClass_Object(self)
|
return _AVMClass_Object(self)
|
||||||
@ -148,8 +150,38 @@ def _read_byte(reader):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
StringClass = _AVMClass('(no name idx)', 'String')
|
||||||
|
ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray')
|
||||||
|
TimerClass = _AVMClass('(no name idx)', 'Timer')
|
||||||
|
TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'})
|
||||||
|
_builtin_classes = {
|
||||||
|
StringClass.name: StringClass,
|
||||||
|
ByteArrayClass.name: ByteArrayClass,
|
||||||
|
TimerClass.name: TimerClass,
|
||||||
|
TimerEventClass.name: TimerEventClass,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class _Undefined(object):
|
||||||
|
def __bool__(self):
|
||||||
|
return False
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return 'undefined'
|
||||||
|
__repr__ = __str__
|
||||||
|
|
||||||
|
undefined = _Undefined()
|
||||||
|
|
||||||
|
|
||||||
class SWFInterpreter(object):
|
class SWFInterpreter(object):
|
||||||
def __init__(self, file_contents):
|
def __init__(self, file_contents):
|
||||||
|
self._patched_functions = {
|
||||||
|
(TimerClass, 'addEventListener'): lambda params: undefined,
|
||||||
|
}
|
||||||
code_tag = next(tag
|
code_tag = next(tag
|
||||||
for tag_code, tag in _extract_tags(file_contents)
|
for tag_code, tag in _extract_tags(file_contents)
|
||||||
if tag_code == 82)
|
if tag_code == 82)
|
||||||
@ -170,11 +202,13 @@ class SWFInterpreter(object):
|
|||||||
|
|
||||||
# Constant pool
|
# Constant pool
|
||||||
int_count = u30()
|
int_count = u30()
|
||||||
|
self.constant_ints = [0]
|
||||||
for _c in range(1, int_count):
|
for _c in range(1, int_count):
|
||||||
s32()
|
self.constant_ints.append(s32())
|
||||||
|
self.constant_uints = [0]
|
||||||
uint_count = u30()
|
uint_count = u30()
|
||||||
for _c in range(1, uint_count):
|
for _c in range(1, uint_count):
|
||||||
u32()
|
self.constant_uints.append(u32())
|
||||||
double_count = u30()
|
double_count = u30()
|
||||||
read_bytes(max(0, (double_count - 1)) * 8)
|
read_bytes(max(0, (double_count - 1)) * 8)
|
||||||
string_count = u30()
|
string_count = u30()
|
||||||
@ -212,6 +246,10 @@ class SWFInterpreter(object):
|
|||||||
u30() # namespace_idx
|
u30() # namespace_idx
|
||||||
name_idx = u30()
|
name_idx = u30()
|
||||||
self.multinames.append(self.constant_strings[name_idx])
|
self.multinames.append(self.constant_strings[name_idx])
|
||||||
|
elif kind == 0x09:
|
||||||
|
name_idx = u30()
|
||||||
|
u30()
|
||||||
|
self.multinames.append(self.constant_strings[name_idx])
|
||||||
else:
|
else:
|
||||||
self.multinames.append(_Multiname(kind))
|
self.multinames.append(_Multiname(kind))
|
||||||
for _c2 in range(MULTINAME_SIZES[kind]):
|
for _c2 in range(MULTINAME_SIZES[kind]):
|
||||||
@ -258,13 +296,28 @@ class SWFInterpreter(object):
|
|||||||
kind = kind_full & 0x0f
|
kind = kind_full & 0x0f
|
||||||
attrs = kind_full >> 4
|
attrs = kind_full >> 4
|
||||||
methods = {}
|
methods = {}
|
||||||
if kind in [0x00, 0x06]: # Slot or Const
|
constants = None
|
||||||
|
if kind == 0x00: # Slot
|
||||||
u30() # Slot id
|
u30() # Slot id
|
||||||
u30() # type_name_idx
|
u30() # type_name_idx
|
||||||
vindex = u30()
|
vindex = u30()
|
||||||
if vindex != 0:
|
if vindex != 0:
|
||||||
read_byte() # vkind
|
read_byte() # vkind
|
||||||
elif kind in [0x01, 0x02, 0x03]: # Method / Getter / Setter
|
elif kind == 0x06: # Const
|
||||||
|
u30() # Slot id
|
||||||
|
u30() # type_name_idx
|
||||||
|
vindex = u30()
|
||||||
|
vkind = 'any'
|
||||||
|
if vindex != 0:
|
||||||
|
vkind = read_byte()
|
||||||
|
if vkind == 0x03: # Constant_Int
|
||||||
|
value = self.constant_ints[vindex]
|
||||||
|
elif vkind == 0x04: # Constant_UInt
|
||||||
|
value = self.constant_uints[vindex]
|
||||||
|
else:
|
||||||
|
return {}, None # Ignore silently for now
|
||||||
|
constants = {self.multinames[trait_name_idx]: value}
|
||||||
|
elif kind in (0x01, 0x02, 0x03): # Method / Getter / Setter
|
||||||
u30() # disp_id
|
u30() # disp_id
|
||||||
method_idx = u30()
|
method_idx = u30()
|
||||||
methods[self.multinames[trait_name_idx]] = method_idx
|
methods[self.multinames[trait_name_idx]] = method_idx
|
||||||
@ -283,7 +336,7 @@ class SWFInterpreter(object):
|
|||||||
for _c3 in range(metadata_count):
|
for _c3 in range(metadata_count):
|
||||||
u30() # metadata index
|
u30() # metadata index
|
||||||
|
|
||||||
return methods
|
return methods, constants
|
||||||
|
|
||||||
# Classes
|
# Classes
|
||||||
class_count = u30()
|
class_count = u30()
|
||||||
@ -305,18 +358,22 @@ class SWFInterpreter(object):
|
|||||||
u30() # iinit
|
u30() # iinit
|
||||||
trait_count = u30()
|
trait_count = u30()
|
||||||
for _c2 in range(trait_count):
|
for _c2 in range(trait_count):
|
||||||
trait_methods = parse_traits_info()
|
trait_methods, trait_constants = parse_traits_info()
|
||||||
avm_class.register_methods(trait_methods)
|
avm_class.register_methods(trait_methods)
|
||||||
|
if trait_constants:
|
||||||
|
avm_class.constants.update(trait_constants)
|
||||||
|
|
||||||
assert len(classes) == class_count
|
assert len(classes) == class_count
|
||||||
self._classes_by_name = dict((c.name, c) for c in classes)
|
self._classes_by_name = dict((c.name, c) for c in classes)
|
||||||
|
|
||||||
for avm_class in classes:
|
for avm_class in classes:
|
||||||
u30() # cinit
|
avm_class.cinit_idx = u30()
|
||||||
trait_count = u30()
|
trait_count = u30()
|
||||||
for _c2 in range(trait_count):
|
for _c2 in range(trait_count):
|
||||||
trait_methods = parse_traits_info()
|
trait_methods, trait_constants = parse_traits_info()
|
||||||
avm_class.register_methods(trait_methods)
|
avm_class.register_methods(trait_methods)
|
||||||
|
if trait_constants:
|
||||||
|
avm_class.constants.update(trait_constants)
|
||||||
|
|
||||||
# Scripts
|
# Scripts
|
||||||
script_count = u30()
|
script_count = u30()
|
||||||
@ -329,6 +386,7 @@ class SWFInterpreter(object):
|
|||||||
# Method bodies
|
# Method bodies
|
||||||
method_body_count = u30()
|
method_body_count = u30()
|
||||||
Method = collections.namedtuple('Method', ['code', 'local_count'])
|
Method = collections.namedtuple('Method', ['code', 'local_count'])
|
||||||
|
self._all_methods = []
|
||||||
for _c in range(method_body_count):
|
for _c in range(method_body_count):
|
||||||
method_idx = u30()
|
method_idx = u30()
|
||||||
u30() # max_stack
|
u30() # max_stack
|
||||||
@ -337,9 +395,10 @@ class SWFInterpreter(object):
|
|||||||
u30() # max_scope_depth
|
u30() # max_scope_depth
|
||||||
code_length = u30()
|
code_length = u30()
|
||||||
code = read_bytes(code_length)
|
code = read_bytes(code_length)
|
||||||
|
m = Method(code, local_count)
|
||||||
|
self._all_methods.append(m)
|
||||||
for avm_class in classes:
|
for avm_class in classes:
|
||||||
if method_idx in avm_class.method_idxs:
|
if method_idx in avm_class.method_idxs:
|
||||||
m = Method(code, local_count)
|
|
||||||
avm_class.methods[avm_class.method_idxs[method_idx]] = m
|
avm_class.methods[avm_class.method_idxs[method_idx]] = m
|
||||||
exception_count = u30()
|
exception_count = u30()
|
||||||
for _c2 in range(exception_count):
|
for _c2 in range(exception_count):
|
||||||
@ -354,13 +413,27 @@ class SWFInterpreter(object):
|
|||||||
|
|
||||||
assert p + code_reader.tell() == len(code_tag)
|
assert p + code_reader.tell() == len(code_tag)
|
||||||
|
|
||||||
def extract_class(self, class_name):
|
def patch_function(self, avm_class, func_name, f):
|
||||||
|
self._patched_functions[(avm_class, func_name)] = f
|
||||||
|
|
||||||
|
def extract_class(self, class_name, call_cinit=True):
|
||||||
try:
|
try:
|
||||||
return self._classes_by_name[class_name]
|
res = self._classes_by_name[class_name]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise ExtractorError('Class %r not found' % class_name)
|
raise ExtractorError('Class %r not found' % class_name)
|
||||||
|
|
||||||
|
if call_cinit and hasattr(res, 'cinit_idx'):
|
||||||
|
res.register_methods({'$cinit': res.cinit_idx})
|
||||||
|
res.methods['$cinit'] = self._all_methods[res.cinit_idx]
|
||||||
|
cinit = self.extract_function(res, '$cinit')
|
||||||
|
cinit([])
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
def extract_function(self, avm_class, func_name):
|
def extract_function(self, avm_class, func_name):
|
||||||
|
p = self._patched_functions.get((avm_class, func_name))
|
||||||
|
if p:
|
||||||
|
return p
|
||||||
if func_name in avm_class.method_pyfunctions:
|
if func_name in avm_class.method_pyfunctions:
|
||||||
return avm_class.method_pyfunctions[func_name]
|
return avm_class.method_pyfunctions[func_name]
|
||||||
if func_name in self._classes_by_name:
|
if func_name in self._classes_by_name:
|
||||||
@ -379,10 +452,15 @@ class SWFInterpreter(object):
|
|||||||
registers = [avm_class.variables] + list(args) + [None] * m.local_count
|
registers = [avm_class.variables] + list(args) + [None] * m.local_count
|
||||||
stack = []
|
stack = []
|
||||||
scopes = collections.deque([
|
scopes = collections.deque([
|
||||||
self._classes_by_name, avm_class.variables])
|
self._classes_by_name, avm_class.constants, avm_class.variables])
|
||||||
while True:
|
while True:
|
||||||
opcode = _read_byte(coder)
|
opcode = _read_byte(coder)
|
||||||
if opcode == 17: # iftrue
|
if opcode == 9: # label
|
||||||
|
pass # Spec says: "Do nothing."
|
||||||
|
elif opcode == 16: # jump
|
||||||
|
offset = s24()
|
||||||
|
coder.seek(coder.tell() + offset)
|
||||||
|
elif opcode == 17: # iftrue
|
||||||
offset = s24()
|
offset = s24()
|
||||||
value = stack.pop()
|
value = stack.pop()
|
||||||
if value:
|
if value:
|
||||||
@ -392,9 +470,40 @@ class SWFInterpreter(object):
|
|||||||
value = stack.pop()
|
value = stack.pop()
|
||||||
if not value:
|
if not value:
|
||||||
coder.seek(coder.tell() + offset)
|
coder.seek(coder.tell() + offset)
|
||||||
|
elif opcode == 19: # ifeq
|
||||||
|
offset = s24()
|
||||||
|
value2 = stack.pop()
|
||||||
|
value1 = stack.pop()
|
||||||
|
if value2 == value1:
|
||||||
|
coder.seek(coder.tell() + offset)
|
||||||
|
elif opcode == 20: # ifne
|
||||||
|
offset = s24()
|
||||||
|
value2 = stack.pop()
|
||||||
|
value1 = stack.pop()
|
||||||
|
if value2 != value1:
|
||||||
|
coder.seek(coder.tell() + offset)
|
||||||
|
elif opcode == 21: # iflt
|
||||||
|
offset = s24()
|
||||||
|
value2 = stack.pop()
|
||||||
|
value1 = stack.pop()
|
||||||
|
if value1 < value2:
|
||||||
|
coder.seek(coder.tell() + offset)
|
||||||
|
elif opcode == 32: # pushnull
|
||||||
|
stack.append(None)
|
||||||
|
elif opcode == 33: # pushundefined
|
||||||
|
stack.append(undefined)
|
||||||
elif opcode == 36: # pushbyte
|
elif opcode == 36: # pushbyte
|
||||||
v = _read_byte(coder)
|
v = _read_byte(coder)
|
||||||
stack.append(v)
|
stack.append(v)
|
||||||
|
elif opcode == 37: # pushshort
|
||||||
|
v = u30()
|
||||||
|
stack.append(v)
|
||||||
|
elif opcode == 38: # pushtrue
|
||||||
|
stack.append(True)
|
||||||
|
elif opcode == 39: # pushfalse
|
||||||
|
stack.append(False)
|
||||||
|
elif opcode == 40: # pushnan
|
||||||
|
stack.append(float('NaN'))
|
||||||
elif opcode == 42: # dup
|
elif opcode == 42: # dup
|
||||||
value = stack[-1]
|
value = stack[-1]
|
||||||
stack.append(value)
|
stack.append(value)
|
||||||
@ -419,11 +528,31 @@ class SWFInterpreter(object):
|
|||||||
[stack.pop() for _ in range(arg_count)]))
|
[stack.pop() for _ in range(arg_count)]))
|
||||||
obj = stack.pop()
|
obj = stack.pop()
|
||||||
|
|
||||||
if isinstance(obj, _AVMClass_Object):
|
if obj == StringClass:
|
||||||
|
if mname == 'String':
|
||||||
|
assert len(args) == 1
|
||||||
|
assert isinstance(args[0], (
|
||||||
|
int, compat_str, _Undefined))
|
||||||
|
if args[0] == undefined:
|
||||||
|
res = 'undefined'
|
||||||
|
else:
|
||||||
|
res = compat_str(args[0])
|
||||||
|
stack.append(res)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(
|
||||||
|
'Function String.%s is not yet implemented'
|
||||||
|
% mname)
|
||||||
|
elif isinstance(obj, _AVMClass_Object):
|
||||||
func = self.extract_function(obj.avm_class, mname)
|
func = self.extract_function(obj.avm_class, mname)
|
||||||
res = func(args)
|
res = func(args)
|
||||||
stack.append(res)
|
stack.append(res)
|
||||||
continue
|
continue
|
||||||
|
elif isinstance(obj, _AVMClass):
|
||||||
|
func = self.extract_function(obj, mname)
|
||||||
|
res = func(args)
|
||||||
|
stack.append(res)
|
||||||
|
continue
|
||||||
elif isinstance(obj, _ScopeDict):
|
elif isinstance(obj, _ScopeDict):
|
||||||
if mname in obj.avm_class.method_names:
|
if mname in obj.avm_class.method_names:
|
||||||
func = self.extract_function(obj.avm_class, mname)
|
func = self.extract_function(obj.avm_class, mname)
|
||||||
@ -442,6 +571,13 @@ class SWFInterpreter(object):
|
|||||||
res = obj.split(args[0])
|
res = obj.split(args[0])
|
||||||
stack.append(res)
|
stack.append(res)
|
||||||
continue
|
continue
|
||||||
|
elif mname == 'charCodeAt':
|
||||||
|
assert len(args) <= 1
|
||||||
|
idx = 0 if len(args) == 0 else args[0]
|
||||||
|
assert isinstance(idx, int)
|
||||||
|
res = ord(obj[idx])
|
||||||
|
stack.append(res)
|
||||||
|
continue
|
||||||
elif isinstance(obj, list):
|
elif isinstance(obj, list):
|
||||||
if mname == 'slice':
|
if mname == 'slice':
|
||||||
assert len(args) == 1
|
assert len(args) == 1
|
||||||
@ -458,9 +594,18 @@ class SWFInterpreter(object):
|
|||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
'Unsupported property %r on %r'
|
'Unsupported property %r on %r'
|
||||||
% (mname, obj))
|
% (mname, obj))
|
||||||
|
elif opcode == 71: # returnvoid
|
||||||
|
res = undefined
|
||||||
|
return res
|
||||||
elif opcode == 72: # returnvalue
|
elif opcode == 72: # returnvalue
|
||||||
res = stack.pop()
|
res = stack.pop()
|
||||||
return res
|
return res
|
||||||
|
elif opcode == 73: # constructsuper
|
||||||
|
# Not yet implemented, just hope it works without it
|
||||||
|
arg_count = u30()
|
||||||
|
args = list(reversed(
|
||||||
|
[stack.pop() for _ in range(arg_count)]))
|
||||||
|
obj = stack.pop()
|
||||||
elif opcode == 74: # constructproperty
|
elif opcode == 74: # constructproperty
|
||||||
index = u30()
|
index = u30()
|
||||||
arg_count = u30()
|
arg_count = u30()
|
||||||
@ -481,6 +626,17 @@ class SWFInterpreter(object):
|
|||||||
args = list(reversed(
|
args = list(reversed(
|
||||||
[stack.pop() for _ in range(arg_count)]))
|
[stack.pop() for _ in range(arg_count)]))
|
||||||
obj = stack.pop()
|
obj = stack.pop()
|
||||||
|
if isinstance(obj, _AVMClass_Object):
|
||||||
|
func = self.extract_function(obj.avm_class, mname)
|
||||||
|
res = func(args)
|
||||||
|
assert res is undefined
|
||||||
|
continue
|
||||||
|
if isinstance(obj, _ScopeDict):
|
||||||
|
assert mname in obj.avm_class.method_names
|
||||||
|
func = self.extract_function(obj.avm_class, mname)
|
||||||
|
res = func(args)
|
||||||
|
assert res is undefined
|
||||||
|
continue
|
||||||
if mname == 'reverse':
|
if mname == 'reverse':
|
||||||
assert isinstance(obj, list)
|
assert isinstance(obj, list)
|
||||||
obj.reverse()
|
obj.reverse()
|
||||||
@ -504,7 +660,10 @@ class SWFInterpreter(object):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
res = scopes[0]
|
res = scopes[0]
|
||||||
stack.append(res[mname])
|
if mname not in res and mname in _builtin_classes:
|
||||||
|
stack.append(_builtin_classes[mname])
|
||||||
|
else:
|
||||||
|
stack.append(res[mname])
|
||||||
elif opcode == 94: # findproperty
|
elif opcode == 94: # findproperty
|
||||||
index = u30()
|
index = u30()
|
||||||
mname = self.multinames[index]
|
mname = self.multinames[index]
|
||||||
@ -524,9 +683,15 @@ class SWFInterpreter(object):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
scope = avm_class.variables
|
scope = avm_class.variables
|
||||||
# I cannot find where static variables are initialized
|
|
||||||
# so let's just return None
|
if mname in scope:
|
||||||
res = scope.get(mname)
|
res = scope[mname]
|
||||||
|
elif mname in _builtin_classes:
|
||||||
|
res = _builtin_classes[mname]
|
||||||
|
else:
|
||||||
|
# Assume unitialized
|
||||||
|
# TODO warn here
|
||||||
|
res = undefined
|
||||||
stack.append(res)
|
stack.append(res)
|
||||||
elif opcode == 97: # setproperty
|
elif opcode == 97: # setproperty
|
||||||
index = u30()
|
index = u30()
|
||||||
@ -548,22 +713,57 @@ class SWFInterpreter(object):
|
|||||||
pname = self.multinames[index]
|
pname = self.multinames[index]
|
||||||
if pname == 'length':
|
if pname == 'length':
|
||||||
obj = stack.pop()
|
obj = stack.pop()
|
||||||
assert isinstance(obj, list)
|
assert isinstance(obj, (compat_str, list))
|
||||||
stack.append(len(obj))
|
stack.append(len(obj))
|
||||||
|
elif isinstance(pname, compat_str): # Member access
|
||||||
|
obj = stack.pop()
|
||||||
|
if isinstance(obj, _AVMClass):
|
||||||
|
res = obj.static_properties[pname]
|
||||||
|
stack.append(res)
|
||||||
|
continue
|
||||||
|
|
||||||
|
assert isinstance(obj, (dict, _ScopeDict)),\
|
||||||
|
'Accessing member %r on %r' % (pname, obj)
|
||||||
|
res = obj.get(pname, undefined)
|
||||||
|
stack.append(res)
|
||||||
else: # Assume attribute access
|
else: # Assume attribute access
|
||||||
idx = stack.pop()
|
idx = stack.pop()
|
||||||
assert isinstance(idx, int)
|
assert isinstance(idx, int)
|
||||||
obj = stack.pop()
|
obj = stack.pop()
|
||||||
assert isinstance(obj, list)
|
assert isinstance(obj, list)
|
||||||
stack.append(obj[idx])
|
stack.append(obj[idx])
|
||||||
|
elif opcode == 104: # initproperty
|
||||||
|
index = u30()
|
||||||
|
value = stack.pop()
|
||||||
|
idx = self.multinames[index]
|
||||||
|
if isinstance(idx, _Multiname):
|
||||||
|
idx = stack.pop()
|
||||||
|
obj = stack.pop()
|
||||||
|
obj[idx] = value
|
||||||
elif opcode == 115: # convert_
|
elif opcode == 115: # convert_
|
||||||
value = stack.pop()
|
value = stack.pop()
|
||||||
intvalue = int(value)
|
intvalue = int(value)
|
||||||
stack.append(intvalue)
|
stack.append(intvalue)
|
||||||
elif opcode == 128: # coerce
|
elif opcode == 128: # coerce
|
||||||
u30()
|
u30()
|
||||||
|
elif opcode == 130: # coerce_a
|
||||||
|
value = stack.pop()
|
||||||
|
# um, yes, it's any value
|
||||||
|
stack.append(value)
|
||||||
elif opcode == 133: # coerce_s
|
elif opcode == 133: # coerce_s
|
||||||
assert isinstance(stack[-1], (type(None), compat_str))
|
assert isinstance(stack[-1], (type(None), compat_str))
|
||||||
|
elif opcode == 147: # decrement
|
||||||
|
value = stack.pop()
|
||||||
|
assert isinstance(value, int)
|
||||||
|
stack.append(value - 1)
|
||||||
|
elif opcode == 149: # typeof
|
||||||
|
value = stack.pop()
|
||||||
|
return {
|
||||||
|
_Undefined: 'undefined',
|
||||||
|
compat_str: 'String',
|
||||||
|
int: 'Number',
|
||||||
|
float: 'Number',
|
||||||
|
}[type(value)]
|
||||||
elif opcode == 160: # add
|
elif opcode == 160: # add
|
||||||
value2 = stack.pop()
|
value2 = stack.pop()
|
||||||
value1 = stack.pop()
|
value1 = stack.pop()
|
||||||
@ -574,16 +774,37 @@ class SWFInterpreter(object):
|
|||||||
value1 = stack.pop()
|
value1 = stack.pop()
|
||||||
res = value1 - value2
|
res = value1 - value2
|
||||||
stack.append(res)
|
stack.append(res)
|
||||||
|
elif opcode == 162: # multiply
|
||||||
|
value2 = stack.pop()
|
||||||
|
value1 = stack.pop()
|
||||||
|
res = value1 * value2
|
||||||
|
stack.append(res)
|
||||||
elif opcode == 164: # modulo
|
elif opcode == 164: # modulo
|
||||||
value2 = stack.pop()
|
value2 = stack.pop()
|
||||||
value1 = stack.pop()
|
value1 = stack.pop()
|
||||||
res = value1 % value2
|
res = value1 % value2
|
||||||
stack.append(res)
|
stack.append(res)
|
||||||
|
elif opcode == 168: # bitand
|
||||||
|
value2 = stack.pop()
|
||||||
|
value1 = stack.pop()
|
||||||
|
assert isinstance(value1, int)
|
||||||
|
assert isinstance(value2, int)
|
||||||
|
res = value1 & value2
|
||||||
|
stack.append(res)
|
||||||
|
elif opcode == 171: # equals
|
||||||
|
value2 = stack.pop()
|
||||||
|
value1 = stack.pop()
|
||||||
|
result = value1 == value2
|
||||||
|
stack.append(result)
|
||||||
elif opcode == 175: # greaterequals
|
elif opcode == 175: # greaterequals
|
||||||
value2 = stack.pop()
|
value2 = stack.pop()
|
||||||
value1 = stack.pop()
|
value1 = stack.pop()
|
||||||
result = value1 >= value2
|
result = value1 >= value2
|
||||||
stack.append(result)
|
stack.append(result)
|
||||||
|
elif opcode == 192: # increment_i
|
||||||
|
value = stack.pop()
|
||||||
|
assert isinstance(value, int)
|
||||||
|
stack.append(value + 1)
|
||||||
elif opcode == 208: # getlocal_0
|
elif opcode == 208: # getlocal_0
|
||||||
stack.append(registers[0])
|
stack.append(registers[0])
|
||||||
elif opcode == 209: # getlocal_1
|
elif opcode == 209: # getlocal_1
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import calendar
|
import calendar
|
||||||
import codecs
|
import codecs
|
||||||
import contextlib
|
import contextlib
|
||||||
@ -8,7 +10,6 @@ import ctypes
|
|||||||
import datetime
|
import datetime
|
||||||
import email.utils
|
import email.utils
|
||||||
import errno
|
import errno
|
||||||
import getpass
|
|
||||||
import gzip
|
import gzip
|
||||||
import itertools
|
import itertools
|
||||||
import io
|
import io
|
||||||
@ -29,254 +30,18 @@ import traceback
|
|||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
import zlib
|
import zlib
|
||||||
|
|
||||||
try:
|
from .compat import (
|
||||||
import urllib.request as compat_urllib_request
|
compat_chr,
|
||||||
except ImportError: # Python 2
|
compat_getenv,
|
||||||
import urllib2 as compat_urllib_request
|
compat_html_entities,
|
||||||
|
compat_parse_qs,
|
||||||
try:
|
compat_str,
|
||||||
import urllib.error as compat_urllib_error
|
compat_urllib_error,
|
||||||
except ImportError: # Python 2
|
compat_urllib_parse,
|
||||||
import urllib2 as compat_urllib_error
|
compat_urllib_parse_urlparse,
|
||||||
|
compat_urllib_request,
|
||||||
try:
|
compat_urlparse,
|
||||||
import urllib.parse as compat_urllib_parse
|
)
|
||||||
except ImportError: # Python 2
|
|
||||||
import urllib as compat_urllib_parse
|
|
||||||
|
|
||||||
try:
|
|
||||||
from urllib.parse import urlparse as compat_urllib_parse_urlparse
|
|
||||||
except ImportError: # Python 2
|
|
||||||
from urlparse import urlparse as compat_urllib_parse_urlparse
|
|
||||||
|
|
||||||
try:
|
|
||||||
import urllib.parse as compat_urlparse
|
|
||||||
except ImportError: # Python 2
|
|
||||||
import urlparse as compat_urlparse
|
|
||||||
|
|
||||||
try:
|
|
||||||
import http.cookiejar as compat_cookiejar
|
|
||||||
except ImportError: # Python 2
|
|
||||||
import cookielib as compat_cookiejar
|
|
||||||
|
|
||||||
try:
|
|
||||||
import html.entities as compat_html_entities
|
|
||||||
except ImportError: # Python 2
|
|
||||||
import htmlentitydefs as compat_html_entities
|
|
||||||
|
|
||||||
try:
|
|
||||||
import html.parser as compat_html_parser
|
|
||||||
except ImportError: # Python 2
|
|
||||||
import HTMLParser as compat_html_parser
|
|
||||||
|
|
||||||
try:
|
|
||||||
import http.client as compat_http_client
|
|
||||||
except ImportError: # Python 2
|
|
||||||
import httplib as compat_http_client
|
|
||||||
|
|
||||||
try:
|
|
||||||
from urllib.error import HTTPError as compat_HTTPError
|
|
||||||
except ImportError: # Python 2
|
|
||||||
from urllib2 import HTTPError as compat_HTTPError
|
|
||||||
|
|
||||||
try:
|
|
||||||
from urllib.request import urlretrieve as compat_urlretrieve
|
|
||||||
except ImportError: # Python 2
|
|
||||||
from urllib import urlretrieve as compat_urlretrieve
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
from subprocess import DEVNULL
|
|
||||||
compat_subprocess_get_DEVNULL = lambda: DEVNULL
|
|
||||||
except ImportError:
|
|
||||||
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
|
|
||||||
|
|
||||||
try:
|
|
||||||
from urllib.parse import unquote as compat_urllib_parse_unquote
|
|
||||||
except ImportError:
|
|
||||||
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
|
||||||
if string == '':
|
|
||||||
return string
|
|
||||||
res = string.split('%')
|
|
||||||
if len(res) == 1:
|
|
||||||
return string
|
|
||||||
if encoding is None:
|
|
||||||
encoding = 'utf-8'
|
|
||||||
if errors is None:
|
|
||||||
errors = 'replace'
|
|
||||||
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
|
|
||||||
pct_sequence = b''
|
|
||||||
string = res[0]
|
|
||||||
for item in res[1:]:
|
|
||||||
try:
|
|
||||||
if not item:
|
|
||||||
raise ValueError
|
|
||||||
pct_sequence += item[:2].decode('hex')
|
|
||||||
rest = item[2:]
|
|
||||||
if not rest:
|
|
||||||
# This segment was just a single percent-encoded character.
|
|
||||||
# May be part of a sequence of code units, so delay decoding.
|
|
||||||
# (Stored in pct_sequence).
|
|
||||||
continue
|
|
||||||
except ValueError:
|
|
||||||
rest = '%' + item
|
|
||||||
# Encountered non-percent-encoded characters. Flush the current
|
|
||||||
# pct_sequence.
|
|
||||||
string += pct_sequence.decode(encoding, errors) + rest
|
|
||||||
pct_sequence = b''
|
|
||||||
if pct_sequence:
|
|
||||||
# Flush the final pct_sequence
|
|
||||||
string += pct_sequence.decode(encoding, errors)
|
|
||||||
return string
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
from urllib.parse import parse_qs as compat_parse_qs
|
|
||||||
except ImportError: # Python 2
|
|
||||||
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
|
|
||||||
# Python 2's version is apparently totally broken
|
|
||||||
|
|
||||||
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
|
|
||||||
encoding='utf-8', errors='replace'):
|
|
||||||
qs, _coerce_result = qs, unicode
|
|
||||||
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
|
|
||||||
r = []
|
|
||||||
for name_value in pairs:
|
|
||||||
if not name_value and not strict_parsing:
|
|
||||||
continue
|
|
||||||
nv = name_value.split('=', 1)
|
|
||||||
if len(nv) != 2:
|
|
||||||
if strict_parsing:
|
|
||||||
raise ValueError("bad query field: %r" % (name_value,))
|
|
||||||
# Handle case of a control-name with no equal sign
|
|
||||||
if keep_blank_values:
|
|
||||||
nv.append('')
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
if len(nv[1]) or keep_blank_values:
|
|
||||||
name = nv[0].replace('+', ' ')
|
|
||||||
name = compat_urllib_parse_unquote(
|
|
||||||
name, encoding=encoding, errors=errors)
|
|
||||||
name = _coerce_result(name)
|
|
||||||
value = nv[1].replace('+', ' ')
|
|
||||||
value = compat_urllib_parse_unquote(
|
|
||||||
value, encoding=encoding, errors=errors)
|
|
||||||
value = _coerce_result(value)
|
|
||||||
r.append((name, value))
|
|
||||||
return r
|
|
||||||
|
|
||||||
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
|
|
||||||
encoding='utf-8', errors='replace'):
|
|
||||||
parsed_result = {}
|
|
||||||
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
|
|
||||||
encoding=encoding, errors=errors)
|
|
||||||
for name, value in pairs:
|
|
||||||
if name in parsed_result:
|
|
||||||
parsed_result[name].append(value)
|
|
||||||
else:
|
|
||||||
parsed_result[name] = [value]
|
|
||||||
return parsed_result
|
|
||||||
|
|
||||||
try:
|
|
||||||
compat_str = unicode # Python 2
|
|
||||||
except NameError:
|
|
||||||
compat_str = str
|
|
||||||
|
|
||||||
try:
|
|
||||||
compat_chr = unichr # Python 2
|
|
||||||
except NameError:
|
|
||||||
compat_chr = chr
|
|
||||||
|
|
||||||
try:
|
|
||||||
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
|
|
||||||
except ImportError: # Python 2.6
|
|
||||||
from xml.parsers.expat import ExpatError as compat_xml_parse_error
|
|
||||||
|
|
||||||
try:
|
|
||||||
from shlex import quote as shlex_quote
|
|
||||||
except ImportError: # Python < 3.3
|
|
||||||
def shlex_quote(s):
|
|
||||||
return "'" + s.replace("'", "'\"'\"'") + "'"
|
|
||||||
|
|
||||||
|
|
||||||
def compat_ord(c):
|
|
||||||
if type(c) is int: return c
|
|
||||||
else: return ord(c)
|
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 0):
|
|
||||||
compat_getenv = os.getenv
|
|
||||||
compat_expanduser = os.path.expanduser
|
|
||||||
else:
|
|
||||||
# Environment variables should be decoded with filesystem encoding.
|
|
||||||
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
|
|
||||||
|
|
||||||
def compat_getenv(key, default=None):
|
|
||||||
env = os.getenv(key, default)
|
|
||||||
if env:
|
|
||||||
env = env.decode(get_filesystem_encoding())
|
|
||||||
return env
|
|
||||||
|
|
||||||
# HACK: The default implementations of os.path.expanduser from cpython do not decode
|
|
||||||
# environment variables with filesystem encoding. We will work around this by
|
|
||||||
# providing adjusted implementations.
|
|
||||||
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
|
||||||
# for different platforms with correct environment variables decoding.
|
|
||||||
|
|
||||||
if os.name == 'posix':
|
|
||||||
def compat_expanduser(path):
|
|
||||||
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
|
||||||
do nothing."""
|
|
||||||
if not path.startswith('~'):
|
|
||||||
return path
|
|
||||||
i = path.find('/', 1)
|
|
||||||
if i < 0:
|
|
||||||
i = len(path)
|
|
||||||
if i == 1:
|
|
||||||
if 'HOME' not in os.environ:
|
|
||||||
import pwd
|
|
||||||
userhome = pwd.getpwuid(os.getuid()).pw_dir
|
|
||||||
else:
|
|
||||||
userhome = compat_getenv('HOME')
|
|
||||||
else:
|
|
||||||
import pwd
|
|
||||||
try:
|
|
||||||
pwent = pwd.getpwnam(path[1:i])
|
|
||||||
except KeyError:
|
|
||||||
return path
|
|
||||||
userhome = pwent.pw_dir
|
|
||||||
userhome = userhome.rstrip('/')
|
|
||||||
return (userhome + path[i:]) or '/'
|
|
||||||
elif os.name == 'nt' or os.name == 'ce':
|
|
||||||
def compat_expanduser(path):
|
|
||||||
"""Expand ~ and ~user constructs.
|
|
||||||
|
|
||||||
If user or $HOME is unknown, do nothing."""
|
|
||||||
if path[:1] != '~':
|
|
||||||
return path
|
|
||||||
i, n = 1, len(path)
|
|
||||||
while i < n and path[i] not in '/\\':
|
|
||||||
i = i + 1
|
|
||||||
|
|
||||||
if 'HOME' in os.environ:
|
|
||||||
userhome = compat_getenv('HOME')
|
|
||||||
elif 'USERPROFILE' in os.environ:
|
|
||||||
userhome = compat_getenv('USERPROFILE')
|
|
||||||
elif not 'HOMEPATH' in os.environ:
|
|
||||||
return path
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
drive = compat_getenv('HOMEDRIVE')
|
|
||||||
except KeyError:
|
|
||||||
drive = ''
|
|
||||||
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
|
|
||||||
|
|
||||||
if i != 1: #~user
|
|
||||||
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
|
|
||||||
|
|
||||||
return userhome + path[i:]
|
|
||||||
else:
|
|
||||||
compat_expanduser = os.path.expanduser
|
|
||||||
|
|
||||||
|
|
||||||
# This is not clearly defined otherwise
|
# This is not clearly defined otherwise
|
||||||
@ -298,28 +63,33 @@ def preferredencoding():
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
pref = locale.getpreferredencoding()
|
pref = locale.getpreferredencoding()
|
||||||
u'TEST'.encode(pref)
|
'TEST'.encode(pref)
|
||||||
except:
|
except:
|
||||||
pref = 'UTF-8'
|
pref = 'UTF-8'
|
||||||
|
|
||||||
return pref
|
return pref
|
||||||
|
|
||||||
if sys.version_info < (3,0):
|
|
||||||
def compat_print(s):
|
|
||||||
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
|
|
||||||
else:
|
|
||||||
def compat_print(s):
|
|
||||||
assert type(s) == type(u'')
|
|
||||||
print(s)
|
|
||||||
|
|
||||||
|
|
||||||
def write_json_file(obj, fn):
|
def write_json_file(obj, fn):
|
||||||
""" Encode obj as JSON and write it to fn, atomically """
|
""" Encode obj as JSON and write it to fn, atomically if possible """
|
||||||
|
|
||||||
|
fn = encodeFilename(fn)
|
||||||
|
if sys.version_info < (3, 0) and sys.platform != 'win32':
|
||||||
|
encoding = get_filesystem_encoding()
|
||||||
|
# os.path.basename returns a bytes object, but NamedTemporaryFile
|
||||||
|
# will fail if the filename contains non ascii characters unless we
|
||||||
|
# use a unicode object
|
||||||
|
path_basename = lambda f: os.path.basename(fn).decode(encoding)
|
||||||
|
# the same for os.path.dirname
|
||||||
|
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
|
||||||
|
else:
|
||||||
|
path_basename = os.path.basename
|
||||||
|
path_dirname = os.path.dirname
|
||||||
|
|
||||||
args = {
|
args = {
|
||||||
'suffix': '.tmp',
|
'suffix': '.tmp',
|
||||||
'prefix': os.path.basename(fn) + '.',
|
'prefix': path_basename(fn) + '.',
|
||||||
'dir': os.path.dirname(fn),
|
'dir': path_dirname(fn),
|
||||||
'delete': False,
|
'delete': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,6 +108,13 @@ def write_json_file(obj, fn):
|
|||||||
try:
|
try:
|
||||||
with tf:
|
with tf:
|
||||||
json.dump(obj, tf)
|
json.dump(obj, tf)
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
# Need to remove existing file on Windows, else os.rename raises
|
||||||
|
# WindowsError or FileExistsError.
|
||||||
|
try:
|
||||||
|
os.unlink(fn)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
os.rename(tf.name, fn)
|
os.rename(tf.name, fn)
|
||||||
except:
|
except:
|
||||||
try:
|
try:
|
||||||
@ -394,127 +171,32 @@ def xpath_text(node, xpath, name=None, fatal=False):
|
|||||||
return n.text
|
return n.text
|
||||||
|
|
||||||
|
|
||||||
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
|
|
||||||
class BaseHTMLParser(compat_html_parser.HTMLParser):
|
|
||||||
def __init(self):
|
|
||||||
compat_html_parser.HTMLParser.__init__(self)
|
|
||||||
self.html = None
|
|
||||||
|
|
||||||
def loads(self, html):
|
|
||||||
self.html = html
|
|
||||||
self.feed(html)
|
|
||||||
self.close()
|
|
||||||
|
|
||||||
class AttrParser(BaseHTMLParser):
|
|
||||||
"""Modified HTMLParser that isolates a tag with the specified attribute"""
|
|
||||||
def __init__(self, attribute, value):
|
|
||||||
self.attribute = attribute
|
|
||||||
self.value = value
|
|
||||||
self.result = None
|
|
||||||
self.started = False
|
|
||||||
self.depth = {}
|
|
||||||
self.watch_startpos = False
|
|
||||||
self.error_count = 0
|
|
||||||
BaseHTMLParser.__init__(self)
|
|
||||||
|
|
||||||
def error(self, message):
|
|
||||||
if self.error_count > 10 or self.started:
|
|
||||||
raise compat_html_parser.HTMLParseError(message, self.getpos())
|
|
||||||
self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
|
|
||||||
self.error_count += 1
|
|
||||||
self.goahead(1)
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
attrs = dict(attrs)
|
|
||||||
if self.started:
|
|
||||||
self.find_startpos(None)
|
|
||||||
if self.attribute in attrs and attrs[self.attribute] == self.value:
|
|
||||||
self.result = [tag]
|
|
||||||
self.started = True
|
|
||||||
self.watch_startpos = True
|
|
||||||
if self.started:
|
|
||||||
if not tag in self.depth: self.depth[tag] = 0
|
|
||||||
self.depth[tag] += 1
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
if self.started:
|
|
||||||
if tag in self.depth: self.depth[tag] -= 1
|
|
||||||
if self.depth[self.result[0]] == 0:
|
|
||||||
self.started = False
|
|
||||||
self.result.append(self.getpos())
|
|
||||||
|
|
||||||
def find_startpos(self, x):
|
|
||||||
"""Needed to put the start position of the result (self.result[1])
|
|
||||||
after the opening tag with the requested id"""
|
|
||||||
if self.watch_startpos:
|
|
||||||
self.watch_startpos = False
|
|
||||||
self.result.append(self.getpos())
|
|
||||||
handle_entityref = handle_charref = handle_data = handle_comment = \
|
|
||||||
handle_decl = handle_pi = unknown_decl = find_startpos
|
|
||||||
|
|
||||||
def get_result(self):
|
|
||||||
if self.result is None:
|
|
||||||
return None
|
|
||||||
if len(self.result) != 3:
|
|
||||||
return None
|
|
||||||
lines = self.html.split('\n')
|
|
||||||
lines = lines[self.result[1][0]-1:self.result[2][0]]
|
|
||||||
lines[0] = lines[0][self.result[1][1]:]
|
|
||||||
if len(lines) == 1:
|
|
||||||
lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
|
|
||||||
lines[-1] = lines[-1][:self.result[2][1]]
|
|
||||||
return '\n'.join(lines).strip()
|
|
||||||
# Hack for https://github.com/rg3/youtube-dl/issues/662
|
|
||||||
if sys.version_info < (2, 7, 3):
|
|
||||||
AttrParser.parse_endtag = (lambda self, i:
|
|
||||||
i + len("</scr'+'ipt>")
|
|
||||||
if self.rawdata[i:].startswith("</scr'+'ipt>")
|
|
||||||
else compat_html_parser.HTMLParser.parse_endtag(self, i))
|
|
||||||
|
|
||||||
def get_element_by_id(id, html):
|
def get_element_by_id(id, html):
|
||||||
"""Return the content of the tag with the specified ID in the passed HTML document"""
|
"""Return the content of the tag with the specified ID in the passed HTML document"""
|
||||||
return get_element_by_attribute("id", id, html)
|
return get_element_by_attribute("id", id, html)
|
||||||
|
|
||||||
|
|
||||||
def get_element_by_attribute(attribute, value, html):
|
def get_element_by_attribute(attribute, value, html):
|
||||||
"""Return the content of the tag with the specified attribute in the passed HTML document"""
|
"""Return the content of the tag with the specified attribute in the passed HTML document"""
|
||||||
parser = AttrParser(attribute, value)
|
|
||||||
try:
|
|
||||||
parser.loads(html)
|
|
||||||
except compat_html_parser.HTMLParseError:
|
|
||||||
pass
|
|
||||||
return parser.get_result()
|
|
||||||
|
|
||||||
class MetaParser(BaseHTMLParser):
|
m = re.search(r'''(?xs)
|
||||||
"""
|
<([a-zA-Z0-9:._-]+)
|
||||||
Modified HTMLParser that isolates a meta tag with the specified name
|
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
|
||||||
attribute.
|
\s+%s=['"]?%s['"]?
|
||||||
"""
|
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
|
||||||
def __init__(self, name):
|
\s*>
|
||||||
BaseHTMLParser.__init__(self)
|
(?P<content>.*?)
|
||||||
self.name = name
|
</\1>
|
||||||
self.content = None
|
''' % (re.escape(attribute), re.escape(value)), html)
|
||||||
self.result = None
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
if not m:
|
||||||
if tag != 'meta':
|
return None
|
||||||
return
|
res = m.group('content')
|
||||||
attrs = dict(attrs)
|
|
||||||
if attrs.get('name') == self.name:
|
|
||||||
self.result = attrs.get('content')
|
|
||||||
|
|
||||||
def get_result(self):
|
if res.startswith('"') or res.startswith("'"):
|
||||||
return self.result
|
res = res[1:-1]
|
||||||
|
|
||||||
def get_meta_content(name, html):
|
return unescapeHTML(res)
|
||||||
"""
|
|
||||||
Return the content attribute from the meta tag with the given name attribute.
|
|
||||||
"""
|
|
||||||
parser = MetaParser(name)
|
|
||||||
try:
|
|
||||||
parser.loads(html)
|
|
||||||
except compat_html_parser.HTMLParseError:
|
|
||||||
pass
|
|
||||||
return parser.get_result()
|
|
||||||
|
|
||||||
|
|
||||||
def clean_html(html):
|
def clean_html(html):
|
||||||
@ -541,7 +223,7 @@ def sanitize_open(filename, open_mode):
|
|||||||
It returns the tuple (stream, definitive_file_name).
|
It returns the tuple (stream, definitive_file_name).
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if filename == u'-':
|
if filename == '-':
|
||||||
if sys.platform == 'win32':
|
if sys.platform == 'win32':
|
||||||
import msvcrt
|
import msvcrt
|
||||||
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
|
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
|
||||||
@ -554,7 +236,7 @@ def sanitize_open(filename, open_mode):
|
|||||||
|
|
||||||
# In case of error, try to remove win32 forbidden chars
|
# In case of error, try to remove win32 forbidden chars
|
||||||
alt_filename = os.path.join(
|
alt_filename = os.path.join(
|
||||||
re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part)
|
re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part)
|
||||||
for path_part in os.path.split(filename)
|
for path_part in os.path.split(filename)
|
||||||
)
|
)
|
||||||
if alt_filename == filename:
|
if alt_filename == filename:
|
||||||
@ -593,7 +275,7 @@ def sanitize_filename(s, restricted=False, is_id=False):
|
|||||||
return '_'
|
return '_'
|
||||||
return char
|
return char
|
||||||
|
|
||||||
result = u''.join(map(replace_insane, s))
|
result = ''.join(map(replace_insane, s))
|
||||||
if not is_id:
|
if not is_id:
|
||||||
while '__' in result:
|
while '__' in result:
|
||||||
result = result.replace('__', '_')
|
result = result.replace('__', '_')
|
||||||
@ -623,15 +305,15 @@ def _htmlentity_transform(entity):
|
|||||||
mobj = re.match(r'#(x?[0-9]+)', entity)
|
mobj = re.match(r'#(x?[0-9]+)', entity)
|
||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
numstr = mobj.group(1)
|
numstr = mobj.group(1)
|
||||||
if numstr.startswith(u'x'):
|
if numstr.startswith('x'):
|
||||||
base = 16
|
base = 16
|
||||||
numstr = u'0%s' % numstr
|
numstr = '0%s' % numstr
|
||||||
else:
|
else:
|
||||||
base = 10
|
base = 10
|
||||||
return compat_chr(int(numstr, base))
|
return compat_chr(int(numstr, base))
|
||||||
|
|
||||||
# Unknown entity in name, return its literal representation
|
# Unknown entity in name, return its literal representation
|
||||||
return (u'&%s;' % entity)
|
return ('&%s;' % entity)
|
||||||
|
|
||||||
|
|
||||||
def unescapeHTML(s):
|
def unescapeHTML(s):
|
||||||
@ -655,7 +337,7 @@ def encodeFilename(s, for_subprocess=False):
|
|||||||
return s
|
return s
|
||||||
|
|
||||||
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
|
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
|
||||||
# Pass u'' directly to use Unicode APIs on Windows 2000 and up
|
# Pass '' directly to use Unicode APIs on Windows 2000 and up
|
||||||
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
|
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
|
||||||
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
|
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
|
||||||
if not for_subprocess:
|
if not for_subprocess:
|
||||||
@ -738,6 +420,7 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs):
|
|||||||
pass # Python < 3.4
|
pass # Python < 3.4
|
||||||
return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
|
return compat_urllib_request.HTTPSHandler(context=context, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class ExtractorError(Exception):
|
class ExtractorError(Exception):
|
||||||
"""Error during info extraction."""
|
"""Error during info extraction."""
|
||||||
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
|
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
|
||||||
@ -750,9 +433,15 @@ class ExtractorError(Exception):
|
|||||||
if video_id is not None:
|
if video_id is not None:
|
||||||
msg = video_id + ': ' + msg
|
msg = video_id + ': ' + msg
|
||||||
if cause:
|
if cause:
|
||||||
msg += u' (caused by %r)' % cause
|
msg += ' (caused by %r)' % cause
|
||||||
if not expected:
|
if not expected:
|
||||||
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
|
if ytdl_is_updateable():
|
||||||
|
update_cmd = 'type youtube-dl -U to update'
|
||||||
|
else:
|
||||||
|
update_cmd = 'see https://yt-dl.org/update on how to update'
|
||||||
|
msg += '; please report this issue on https://yt-dl.org/bug .'
|
||||||
|
msg += ' Make sure you are using the latest version; %s.' % update_cmd
|
||||||
|
msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
|
||||||
super(ExtractorError, self).__init__(msg)
|
super(ExtractorError, self).__init__(msg)
|
||||||
|
|
||||||
self.traceback = tb
|
self.traceback = tb
|
||||||
@ -763,7 +452,7 @@ class ExtractorError(Exception):
|
|||||||
def format_traceback(self):
|
def format_traceback(self):
|
||||||
if self.traceback is None:
|
if self.traceback is None:
|
||||||
return None
|
return None
|
||||||
return u''.join(traceback.format_tb(self.traceback))
|
return ''.join(traceback.format_tb(self.traceback))
|
||||||
|
|
||||||
|
|
||||||
class RegexNotFoundError(ExtractorError):
|
class RegexNotFoundError(ExtractorError):
|
||||||
@ -991,17 +680,17 @@ def unified_strdate(date_str):
|
|||||||
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
|
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
|
||||||
return upload_date
|
return upload_date
|
||||||
|
|
||||||
def determine_ext(url, default_ext=u'unknown_video'):
|
def determine_ext(url, default_ext='unknown_video'):
|
||||||
if url is None:
|
if url is None:
|
||||||
return default_ext
|
return default_ext
|
||||||
guess = url.partition(u'?')[0].rpartition(u'.')[2]
|
guess = url.partition('?')[0].rpartition('.')[2]
|
||||||
if re.match(r'^[A-Za-z0-9]+$', guess):
|
if re.match(r'^[A-Za-z0-9]+$', guess):
|
||||||
return guess
|
return guess
|
||||||
else:
|
else:
|
||||||
return default_ext
|
return default_ext
|
||||||
|
|
||||||
def subtitles_filename(filename, sub_lang, sub_format):
|
def subtitles_filename(filename, sub_lang, sub_format):
|
||||||
return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
|
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
|
||||||
|
|
||||||
def date_from_str(date_str):
|
def date_from_str(date_str):
|
||||||
"""
|
"""
|
||||||
@ -1181,10 +870,7 @@ def bytes_to_intlist(bs):
|
|||||||
def intlist_to_bytes(xs):
|
def intlist_to_bytes(xs):
|
||||||
if not xs:
|
if not xs:
|
||||||
return b''
|
return b''
|
||||||
if isinstance(chr(0), bytes): # Python 2
|
return struct_pack('%dB' % len(xs), *xs)
|
||||||
return ''.join([chr(x) for x in xs])
|
|
||||||
else:
|
|
||||||
return bytes(xs)
|
|
||||||
|
|
||||||
|
|
||||||
# Cross-platform file locking
|
# Cross-platform file locking
|
||||||
@ -1296,7 +982,7 @@ def shell_quote(args):
|
|||||||
# We may get a filename encoded with 'encodeFilename'
|
# We may get a filename encoded with 'encodeFilename'
|
||||||
a = a.decode(encoding)
|
a = a.decode(encoding)
|
||||||
quoted_args.append(pipes.quote(a))
|
quoted_args.append(pipes.quote(a))
|
||||||
return u' '.join(quoted_args)
|
return ' '.join(quoted_args)
|
||||||
|
|
||||||
|
|
||||||
def takewhile_inclusive(pred, seq):
|
def takewhile_inclusive(pred, seq):
|
||||||
@ -1312,31 +998,31 @@ def smuggle_url(url, data):
|
|||||||
""" Pass additional data in a URL for internal use. """
|
""" Pass additional data in a URL for internal use. """
|
||||||
|
|
||||||
sdata = compat_urllib_parse.urlencode(
|
sdata = compat_urllib_parse.urlencode(
|
||||||
{u'__youtubedl_smuggle': json.dumps(data)})
|
{'__youtubedl_smuggle': json.dumps(data)})
|
||||||
return url + u'#' + sdata
|
return url + '#' + sdata
|
||||||
|
|
||||||
|
|
||||||
def unsmuggle_url(smug_url, default=None):
|
def unsmuggle_url(smug_url, default=None):
|
||||||
if not '#__youtubedl_smuggle' in smug_url:
|
if not '#__youtubedl_smuggle' in smug_url:
|
||||||
return smug_url, default
|
return smug_url, default
|
||||||
url, _, sdata = smug_url.rpartition(u'#')
|
url, _, sdata = smug_url.rpartition('#')
|
||||||
jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0]
|
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
|
||||||
data = json.loads(jsond)
|
data = json.loads(jsond)
|
||||||
return url, data
|
return url, data
|
||||||
|
|
||||||
|
|
||||||
def format_bytes(bytes):
|
def format_bytes(bytes):
|
||||||
if bytes is None:
|
if bytes is None:
|
||||||
return u'N/A'
|
return 'N/A'
|
||||||
if type(bytes) is str:
|
if type(bytes) is str:
|
||||||
bytes = float(bytes)
|
bytes = float(bytes)
|
||||||
if bytes == 0.0:
|
if bytes == 0.0:
|
||||||
exponent = 0
|
exponent = 0
|
||||||
else:
|
else:
|
||||||
exponent = int(math.log(bytes, 1024.0))
|
exponent = int(math.log(bytes, 1024.0))
|
||||||
suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent]
|
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
|
||||||
converted = float(bytes) / float(1024 ** exponent)
|
converted = float(bytes) / float(1024 ** exponent)
|
||||||
return u'%.2f%s' % (converted, suffix)
|
return '%.2f%s' % (converted, suffix)
|
||||||
|
|
||||||
|
|
||||||
def get_term_width():
|
def get_term_width():
|
||||||
@ -1359,8 +1045,8 @@ def month_by_name(name):
|
|||||||
""" Return the number of a month by (locale-independently) English name """
|
""" Return the number of a month by (locale-independently) English name """
|
||||||
|
|
||||||
ENGLISH_NAMES = [
|
ENGLISH_NAMES = [
|
||||||
u'January', u'February', u'March', u'April', u'May', u'June',
|
'January', 'February', 'March', 'April', 'May', 'June',
|
||||||
u'July', u'August', u'September', u'October', u'November', u'December']
|
'July', 'August', 'September', 'October', 'November', 'December']
|
||||||
try:
|
try:
|
||||||
return ENGLISH_NAMES.index(name) + 1
|
return ENGLISH_NAMES.index(name) + 1
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@ -1371,7 +1057,7 @@ def fix_xml_ampersands(xml_str):
|
|||||||
"""Replace all the '&' by '&' in XML"""
|
"""Replace all the '&' by '&' in XML"""
|
||||||
return re.sub(
|
return re.sub(
|
||||||
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
|
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
|
||||||
u'&',
|
'&',
|
||||||
xml_str)
|
xml_str)
|
||||||
|
|
||||||
|
|
||||||
@ -1404,7 +1090,7 @@ def remove_end(s, end):
|
|||||||
|
|
||||||
def url_basename(url):
|
def url_basename(url):
|
||||||
path = compat_urlparse.urlparse(url).path
|
path = compat_urlparse.urlparse(url).path
|
||||||
return path.strip(u'/').split(u'/')[-1]
|
return path.strip('/').split('/')[-1]
|
||||||
|
|
||||||
|
|
||||||
class HEADRequest(compat_urllib_request.Request):
|
class HEADRequest(compat_urllib_request.Request):
|
||||||
@ -1429,7 +1115,7 @@ def str_to_int(int_str):
|
|||||||
""" A more relaxed version of int_or_none """
|
""" A more relaxed version of int_or_none """
|
||||||
if int_str is None:
|
if int_str is None:
|
||||||
return None
|
return None
|
||||||
int_str = re.sub(r'[,\.\+]', u'', int_str)
|
int_str = re.sub(r'[,\.\+]', '', int_str)
|
||||||
return int(int_str)
|
return int(int_str)
|
||||||
|
|
||||||
|
|
||||||
@ -1444,7 +1130,12 @@ def parse_duration(s):
|
|||||||
s = s.strip()
|
s = s.strip()
|
||||||
|
|
||||||
m = re.match(
|
m = re.match(
|
||||||
r'(?i)(?:(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$', s)
|
r'''(?ix)T?
|
||||||
|
(?:
|
||||||
|
(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?
|
||||||
|
(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*
|
||||||
|
)?
|
||||||
|
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$''', s)
|
||||||
if not m:
|
if not m:
|
||||||
return None
|
return None
|
||||||
res = int(m.group('secs'))
|
res = int(m.group('secs'))
|
||||||
@ -1459,7 +1150,7 @@ def parse_duration(s):
|
|||||||
|
|
||||||
def prepend_extension(filename, ext):
|
def prepend_extension(filename, ext):
|
||||||
name, real_ext = os.path.splitext(filename)
|
name, real_ext = os.path.splitext(filename)
|
||||||
return u'{0}.{1}{2}'.format(name, ext, real_ext)
|
return '{0}.{1}{2}'.format(name, ext, real_ext)
|
||||||
|
|
||||||
|
|
||||||
def check_executable(exe, args=[]):
|
def check_executable(exe, args=[]):
|
||||||
@ -1472,6 +1163,25 @@ def check_executable(exe, args=[]):
|
|||||||
return exe
|
return exe
|
||||||
|
|
||||||
|
|
||||||
|
def get_exe_version(exe, args=['--version'],
|
||||||
|
version_re=r'version\s+([0-9._-a-zA-Z]+)',
|
||||||
|
unrecognized='present'):
|
||||||
|
""" Returns the version of the specified executable,
|
||||||
|
or False if the executable is not present """
|
||||||
|
try:
|
||||||
|
out, err = subprocess.Popen(
|
||||||
|
[exe] + args,
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
|
||||||
|
except OSError:
|
||||||
|
return False
|
||||||
|
firstline = out.partition(b'\n')[0].decode('ascii', 'ignore')
|
||||||
|
m = re.search(version_re, firstline)
|
||||||
|
if m:
|
||||||
|
return m.group(1)
|
||||||
|
else:
|
||||||
|
return unrecognized
|
||||||
|
|
||||||
|
|
||||||
class PagedList(object):
|
class PagedList(object):
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
# This is only useful for tests
|
# This is only useful for tests
|
||||||
@ -1562,7 +1272,7 @@ def escape_rfc3986(s):
|
|||||||
"""Escape non-ASCII characters as suggested by RFC 3986"""
|
"""Escape non-ASCII characters as suggested by RFC 3986"""
|
||||||
if sys.version_info < (3, 0) and isinstance(s, unicode):
|
if sys.version_info < (3, 0) and isinstance(s, unicode):
|
||||||
s = s.encode('utf-8')
|
s = s.encode('utf-8')
|
||||||
return compat_urllib_parse.quote(s, "%/;:@&=+$,!~*'()?#[]")
|
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
|
||||||
|
|
||||||
|
|
||||||
def escape_url(url):
|
def escape_url(url):
|
||||||
@ -1576,7 +1286,7 @@ def escape_url(url):
|
|||||||
).geturl()
|
).geturl()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
struct.pack(u'!I', 0)
|
struct.pack('!I', 0)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
# In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
|
# In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
|
||||||
def struct_pack(spec, *args):
|
def struct_pack(spec, *args):
|
||||||
@ -1597,7 +1307,7 @@ def read_batch_urls(batch_fd):
|
|||||||
def fixup(url):
|
def fixup(url):
|
||||||
if not isinstance(url, compat_str):
|
if not isinstance(url, compat_str):
|
||||||
url = url.decode('utf-8', 'replace')
|
url = url.decode('utf-8', 'replace')
|
||||||
BOM_UTF8 = u'\xef\xbb\xbf'
|
BOM_UTF8 = '\xef\xbb\xbf'
|
||||||
if url.startswith(BOM_UTF8):
|
if url.startswith(BOM_UTF8):
|
||||||
url = url[len(BOM_UTF8):]
|
url = url[len(BOM_UTF8):]
|
||||||
url = url.strip()
|
url = url.strip()
|
||||||
@ -1636,15 +1346,6 @@ def parse_xml(s):
|
|||||||
return tree
|
return tree
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
|
||||||
def compat_getpass(prompt, *args, **kwargs):
|
|
||||||
if isinstance(prompt, compat_str):
|
|
||||||
prompt = prompt.encode(preferredencoding())
|
|
||||||
return getpass.getpass(prompt, *args, **kwargs)
|
|
||||||
else:
|
|
||||||
compat_getpass = getpass.getpass
|
|
||||||
|
|
||||||
|
|
||||||
US_RATINGS = {
|
US_RATINGS = {
|
||||||
'G': 0,
|
'G': 0,
|
||||||
'PG': 10,
|
'PG': 10,
|
||||||
@ -1662,7 +1363,8 @@ def parse_age_limit(s):
|
|||||||
|
|
||||||
|
|
||||||
def strip_jsonp(code):
|
def strip_jsonp(code):
|
||||||
return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code)
|
return re.sub(
|
||||||
|
r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
|
||||||
|
|
||||||
|
|
||||||
def js_to_json(code):
|
def js_to_json(code):
|
||||||
@ -1702,18 +1404,6 @@ def qualities(quality_ids):
|
|||||||
|
|
||||||
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
|
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
|
||||||
|
|
||||||
try:
|
|
||||||
subprocess_check_output = subprocess.check_output
|
|
||||||
except AttributeError:
|
|
||||||
def subprocess_check_output(*args, **kwargs):
|
|
||||||
assert 'input' not in kwargs
|
|
||||||
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
|
|
||||||
output, _ = p.communicate()
|
|
||||||
ret = p.poll()
|
|
||||||
if ret:
|
|
||||||
raise subprocess.CalledProcessError(ret, p.args, output=output)
|
|
||||||
return output
|
|
||||||
|
|
||||||
|
|
||||||
def limit_length(s, length):
|
def limit_length(s, length):
|
||||||
""" Add ellipses to overly long strings """
|
""" Add ellipses to overly long strings """
|
||||||
@ -1736,3 +1426,10 @@ def is_outdated_version(version, limit, assume_new=True):
|
|||||||
return version_tuple(version) < version_tuple(limit)
|
return version_tuple(version) < version_tuple(limit)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
return not assume_new
|
return not assume_new
|
||||||
|
|
||||||
|
|
||||||
|
def ytdl_is_updateable():
|
||||||
|
""" Returns if youtube-dl can be updated with -U """
|
||||||
|
from zipimport import zipimporter
|
||||||
|
|
||||||
|
return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
|
|
||||||
__version__ = '2014.11.02.1'
|
__version__ = '2014.11.23'
|
||||||
|
Reference in New Issue
Block a user