Compare commits

..

93 Commits

Author SHA1 Message Date
bf951c5e29 release 2014.11.13.2 2014-11-13 16:12:54 +01:00
af63fed7d8 [generic] Add support for livestream embeds (Fixes #4185) 2014-11-13 16:12:51 +01:00
68d1d41c03 Credit @yaccz for freevideo (#4131) 2014-11-13 15:59:48 +01:00
3deed1e91a [freevideo] Simplify and raise error for foreigners (Fixes #4131) 2014-11-13 15:59:22 +01:00
11b28e93d3 Merge remote-tracking branch 'yaccz/add-extractor/freevideo' 2014-11-13 15:53:16 +01:00
c3d582985f release 2014.11.13.1 2014-11-13 15:42:48 +01:00
4c0924bb24 [utils] Fix intlist_to_bytes in Python 2 (#4181) 2014-11-13 15:28:42 +01:00
3fa5bb3802 [sexu] Modernize (#4171) 2014-11-13 15:20:49 +01:00
c47ec62b83 Merge remote-tracking branch 'peugeot/sexu' 2014-11-13 15:18:38 +01:00
e4bdb37ec6 [spiegel] Add support for embeds 2014-11-13 15:02:31 +01:00
3e6e4999ca [test/helper] Improve output 2014-11-13 14:55:45 +01:00
0e15e725a0 [spiegel] Modernize 2014-11-13 14:45:17 +01:00
437f68d868 Update sexu.py 2014-11-13 14:02:53 +01:00
d91d124081 fix python 2 test 2014-11-13 13:57:10 +01:00
2d42905b68 release 2014.11.13 2014-11-13 09:57:58 +01:00
cbe71cb41d Merge pull request #4178 from awojnowski/master
Fix YouTube Signature Extraction
2014-11-13 08:24:29 +01:00
894dd8682e Fix YouTube signature extraction. 2014-11-13 00:33:27 -06:00
9e05d039e0 [dailymotion] Fix extraction of vevo videos (fixes #4168) 2014-11-12 23:32:27 +01:00
bbd5f2de5e [sexu] initial support 2014-11-12 20:41:13 +01:00
73689dafbf [tvplay] Fix f4m URL extraction (Closes #4119)
Add query parameters which are needed by AkamaiHD F4M player.
Also, modernize a bit.
2014-11-12 19:26:00 +02:00
4b50ba0989 Credit @xantares for goldenmoustache (#4128) 2014-11-12 15:53:00 +01:00
5ccaddf5b1 [goldenmoustache] Simplify (#4128) 2014-11-12 15:36:59 +01:00
0b201a3134 Merge remote-tracking branch 'xantares/goldenmoustache' 2014-11-12 15:34:31 +01:00
ffe38646ca [funnyordie] Remove test md5sum (Fixes #4113) 2014-11-12 15:33:15 +01:00
b703ab4d7f Merge remote-tracking branch 'michael-k/links' 2014-11-12 15:31:54 +01:00
c6afed48ff [YoutubeDL] guard against strange sys.stdouts 2014-11-12 15:30:26 +01:00
732c848c14 [abc] Update test case
Old video has expired.
2014-11-12 15:26:29 +01:00
9d2a4dae90 [allocine] Update test 2014-11-12 15:26:09 +01:00
7009a9047a [byutv] Update test 2014-11-12 15:24:37 +01:00
498942f187 [test_youtube_signature] Fix import
Broken in commit 8c25f81bee
2014-11-12 15:23:55 +01:00
28465df1ff [youjizz] Modernize (#4131) 2014-11-12 15:19:23 +01:00
ef89dba58f [myspass] Modernize test case 2014-11-12 15:01:52 +01:00
13ba3a6461 [bandcamp:album] Fix test case 2014-11-12 15:00:54 +01:00
8f6ec4bbe6 release 2014.11.12.1 2014-11-12 11:44:26 +01:00
c295490830 [YoutubeDL] Fix bug in the detection of formats that don't contain video (fixes #4150)
If the format requested was not available, we called the method '.get' in None.
2014-11-12 09:42:35 +01:00
eb4cb42a02 [ted] Extract duration (closes #4155) 2014-11-12 09:30:57 +01:00
7a8cbc72b2 release 2014.11.12 2014-11-12 08:46:34 +01:00
2774852c2f Fix MTV/GameTrailers "Bad Request" error
Bugfix for bug #4123 & #4153
2014-11-12 01:10:08 +01:00
bbcc21efd1 [wrzuta] Fallback to mp3 on unknown media type (#4156) 2014-11-11 16:47:54 +02:00
60526d6bcb [wrzuta] Fix audio extension lookup (Closes #4156)
Also, replace deleted test case
2014-11-11 16:23:06 +02:00
1d4df56d09 release 2014.11.09 2014-11-09 22:32:41 +01:00
a1cf99d03a [YoutubeDL] Add playlist_id and playlist_title fields (Fixes #4139) 2014-11-09 22:32:35 +01:00
3c6af203cc [streamcloud] Match URLs without fname (Closes #4144)
Also, modernize a bit.
2014-11-09 22:00:51 +02:00
1a92e086a7 [tapely] Add Referer header (Closes #4138) 2014-11-09 15:01:12 +02:00
519c73f267 Merge pull request #4136 from andikmu/master
fix swrmediathek for new formats.
2014-11-09 12:17:18 +01:00
a6dae6c09c [ndr] Improve video url regex (fixes #4140) 2014-11-09 11:15:50 +01:00
f866e474f3 [YoutubeDL] Don't dowload formats for merging if the first doesn't contain the video (#4132) 2014-11-09 10:59:56 +01:00
8bb9b97c97 Merge remote-tracking branch 'origin/master' 2014-11-09 08:30:12 +01:00
d6fdc38682 fix swrmediathek for new formats. 2014-11-08 15:56:35 +01:00
c2b61af548 [options] Document the syntax for merging formats (closes #3940, closes #4132) 2014-11-08 15:09:04 +01:00
2fdbf27ad8 [niconico:playlist] Use the same video url the webpage uses (closes #4133) 2014-11-08 14:53:23 +01:00
yac
3898c8a7b2 [FreeVideo] Add new extractor 2014-11-08 00:13:28 +01:00
29ed169cd6 [wrzuta] Add mp3 as a possible format (Closes #4126) 2014-11-07 22:53:54 +02:00
b868c972d1 Add support for goldenmoustache.com 2014-11-07 17:44:06 +00:00
9908e03528 Merge pull request #4076 from ghedo/direct_type
[generic] indicate when a direct video has been extracted
2014-11-06 22:23:14 +01:00
1fe8fb8c20 [vice] Re-add extractor (fixes #4120)
The generic extraction no longer works.
2014-11-06 21:44:07 +01:00
5d63b0aa93 [goshgay] Fix title extraction and modernize
Also remove width and height as they are not of the actual video.
2014-11-06 01:19:20 +02:00
4164f0117e [utils] Remove unused import 2014-11-05 23:56:54 +01:00
37aab27808 [brightcove] Extract m3u8 formats (#3541) 2014-11-06 00:14:33 +02:00
6110bbbfdd [niconico] Catch deleted videos (closes #4064) 2014-11-05 19:52:34 +01:00
cde9b380e6 Merge pull request #4110 from nemunaire/channel9-fix
[channel9] Fix extraction
2014-11-05 19:03:24 +01:00
dab647a7b6 [cinemassacre] Keep both extraction approaches and make more robust (Closes #4109) 2014-11-05 21:32:46 +07:00
a316a83d2b [channel9] Fix extraction 2014-11-05 11:23:11 +01:00
81b22aee8b [izlesene] Update test cases and modernize
The timestamp fluctuates with DST.
2014-11-05 01:00:33 +02:00
a80c96eab0 release 2014.11.04 2014-11-04 23:42:09 +01:00
20436c30c9 [youtube] Clarify output 2014-11-04 23:35:34 +01:00
3828505646 [utils] Use a regexp instead of HTMLParser for get_element_by_attribute 2014-11-04 23:33:43 +01:00
11fba1751d [imdb] Simplify 2014-11-04 23:26:23 +01:00
12ea2f30cf [utils] Remove unused get_meta_content function 2014-11-04 23:20:39 +01:00
9c3e870393 [gamespot] Remove unused import 2014-11-04 23:17:43 +01:00
44789f2457 [ustream] Use modern helper function instead of old HTML parser 2014-11-04 23:15:16 +01:00
711ede6e1b [heise] Fix description, thumbnail and format ID 2014-11-04 23:14:16 +01:00
a32f253112 [gamespot] Modernize 2014-11-04 23:04:12 +01:00
94bd361318 [youtube] Skip sts if missing (Fixes #4095, fixes #4103) 2014-11-04 22:45:43 +01:00
acd40f64ed [cnn] Modernize test definitions 2014-11-04 22:25:15 +01:00
766306450d [played] Capture and output error message 2014-11-04 17:34:53 +07:00
e7642ab572 [wimp] Fix video URL regex 2014-11-04 17:13:17 +07:00
bdf9701729 [generic/brightcove] Add a new test case for kijk.nl (#3541) 2014-11-03 23:13:46 +02:00
b5af6fcdad [brightcove] Make _VALID_URL less greedy and check for empty URLs (#3541) 2014-11-03 23:12:24 +02:00
278143df5b [test_compat] Ignore unicode_literals 2014-11-03 19:12:06 +01:00
fdca55fe34 [trutube] Strip title 2014-11-03 20:14:18 +07:00
4f195f55f0 Do not override stdlib html parser 'locatestarttagend' regex (fixes #4081)
'<a href="foo" ><img src="bar" / ></a>' wouldn't be parsed right (the problem is '/ >', '/>' worked fine).
We need to change it in python 2.6 (for example the description of youtube videos wouldn't be extracted).
2014-11-02 19:31:06 +01:00
ac35c26686 [tests] Don't auto init YoutubeDL
It would print the debug headers for each test.
And nose uses a StringIO object for stdout, which in python 2.x doesn't have the 'encoding' attribute.
2014-11-02 17:53:12 +01:00
982a58d049 [README] Replace links to kernel.org with links to git-scm.com
Unlike kernel.org, the documentation at git-scm.com is up to date and
the rest of the git documentation is easily accessible to any git
newby.
2014-11-02 16:07:40 +01:00
42f7d2f588 [test_download] Fix import 2014-11-02 11:46:12 +01:00
39f0a2a6b7 [test_swfinterp] Correct compilation on modern mxmlc versions 2014-11-02 11:41:33 +01:00
ecc0c5ee01 [utils] Modernize 2014-11-02 11:37:49 +01:00
451948b28c [compat] Modernize 2014-11-02 11:36:29 +01:00
baa708036c [compat] Fix imports 2014-11-02 11:26:40 +01:00
8c25f81bee [util] Move compatibility functions out of util
utils is large enough without these compatibility functions.

Everything that is present in newer versions of Python (i.e. with dev Python it's just an import) goes into compat.py .
Everything else (i.e. youtube-dl-specific helpers) goes into utils.py .
2014-11-02 11:23:42 +01:00
4c83c96795 [YoutubeDL] Include rtmpdump in exe versions -v output 2014-11-02 10:55:36 +01:00
9580711841 [ffmpeg] Move version detection to utils 2014-11-02 10:50:30 +01:00
ccdd0ffb80 [generic] indicate when a direct video has been extracted
Fixes #4052.
2014-11-01 15:34:00 +01:00
71 changed files with 1037 additions and 706 deletions

View File

@ -79,4 +79,6 @@ Dennis Scheiba
Damon Timm
winwon
Xavier Beynon
Gabriel Schubiner
Gabriel Schubiner
xantares
Jan Matějka

View File

@ -131,17 +131,19 @@ which means you can modify it, redistribute it or use it however you like.
%(upload_date)s for the upload date
(YYYYMMDD), %(extractor)s for the provider
(youtube, metacafe, etc), %(id)s for the
video id, %(playlist)s for the playlist the
video id, %(playlist_title)s,
%(playlist_id)s, or %(playlist)s (=title if
present, ID otherwise) for the playlist the
video is in, %(playlist_index)s for the
position in the playlist and %% for a
literal percent. %(height)s and %(width)s
for the width and height of the video
format. %(resolution)s for a textual
position in the playlist. %(height)s and
%(width)s for the width and height of the
video format. %(resolution)s for a textual
description of the resolution of the video
format. Use - to output to stdout. Can also
be used to download to a different
directory, for example with -o '/my/downloa
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
format. %% for a literal percent. Use - to
output to stdout. Can also be used to
download to a different directory, for
example with -o '/my/downloads/%(uploader)s
/%(title)s-%(id)s.%(ext)s' .
--autonumber-size NUMBER Specifies the number of digits in
%(autonumber)s when it is present in output
filename template or --auto-number option
@ -239,8 +241,13 @@ which means you can modify it, redistribute it or use it however you like.
"worst", "worstvideo" and "worstaudio". By
default, youtube-dl will pick the best
quality. Use commas to download multiple
audio formats, such as -f
136/137/mp4/bestvideo,140/m4a/bestaudio
audio formats, such as -f
136/137/mp4/bestvideo,140/m4a/bestaudio.
You can merge the video and audio of two
formats into a single file using -f <video-
format>+<audio-format> (requires ffmpeg or
avconv), for example -f
bestvideo+bestaudio.
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific
one is requested
@ -500,7 +507,7 @@ If you want to add support for a new site, you can follow this quick list (assum
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
$ git add youtube_dl/extractor/__init__.py
$ git add youtube_dl/extractor/yourextractor.py

View File

@ -57,7 +57,7 @@ class FakeYDL(YoutubeDL):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
@ -145,7 +145,8 @@ def expect_info_dict(self, expected_dict, got_dict):
info_dict_str = ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items())
write_string('\n"info_dict": {\n' + info_dict_str + '}\n', out=sys.stderr)
write_string(
'\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (

44
test/test_compat.py Normal file
View File

@ -0,0 +1,44 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.utils import get_filesystem_encoding
from youtube_dl.compat import (
compat_getenv,
compat_expanduser,
)
class TestCompat(unittest.TestCase):
def test_compat_getenv(self):
test_str = 'тест'
os.environ['YOUTUBE-DL-TEST'] = (
test_str if sys.version_info >= (3, 0)
else test_str.encode(get_filesystem_encoding()))
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
def test_compat_expanduser(self):
test_str = 'C:\Documents and Settings\тест\Application Data'
os.environ['HOME'] = (
test_str if sys.version_info >= (3, 0)
else test_str.encode(get_filesystem_encoding()))
self.assertEqual(compat_expanduser('~'), test_str)
def test_all_present(self):
import youtube_dl.compat
all_names = youtube_dl.compat.__all__
present_names = set(filter(
lambda c: '_' in c and not c.startswith('_'),
dir(youtube_dl.compat))) - set(['unicode_literals'])
self.assertEqual(all_names, sorted(present_names))
if __name__ == '__main__':
unittest.main()

View File

@ -23,10 +23,12 @@ import json
import socket
import youtube_dl.YoutubeDL
from youtube_dl.utils import (
from youtube_dl.compat import (
compat_http_client,
compat_urllib_error,
compat_HTTPError,
)
from youtube_dl.utils import (
DownloadError,
ExtractorError,
format_bytes,
@ -94,7 +96,7 @@ def generator(test_case):
params.setdefault('extract_flat', True)
params.setdefault('skip_download', True)
ydl = YoutubeDL(params)
ydl = YoutubeDL(params, auto_init=False)
ydl.add_default_info_extractors()
finished_hook_called = set()
def _hook(status):

View File

@ -37,7 +37,9 @@ def _make_testfunc(testfile):
or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
# Recompile
try:
subprocess.check_call(['mxmlc', '-output', swf_file, as_file])
subprocess.check_call([
'mxmlc', '-output', swf_file,
'-static-link-runtime-shared-libraries', as_file])
except OSError as ose:
if ose.errno == errno.ENOENT:
print('mxmlc not found! Skipping test.')

View File

@ -16,11 +16,11 @@ import json
import xml.etree.ElementTree
from youtube_dl.utils import (
clean_html,
DateRange,
encodeFilename,
find_xpath_attr,
fix_xml_ampersands,
get_meta_content,
orderedSet,
OnDemandPagedList,
InAdvancePagedList,
@ -46,8 +46,7 @@ from youtube_dl.utils import (
escape_url,
js_to_json,
get_filesystem_encoding,
compat_getenv,
compat_expanduser,
intlist_to_bytes,
)
@ -157,17 +156,6 @@ class TestUtil(unittest.TestCase):
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
def test_meta_parser(self):
testhtml = '''
<head>
<meta name="description" content="foo &amp; bar">
<meta content='Plato' name='author'/>
</head>
'''
get_meta = lambda name: get_meta_content(name, testhtml)
self.assertEqual(get_meta('description'), 'foo & bar')
self.assertEqual(get_meta('author'), 'Plato')
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
@ -359,17 +347,14 @@ class TestUtil(unittest.TestCase):
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
def test_compat_getenv(self):
test_str = 'тест'
os.environ['YOUTUBE-DL-TEST'] = (test_str if sys.version_info >= (3, 0)
else test_str.encode(get_filesystem_encoding()))
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
def test_compat_expanduser(self):
test_str = 'C:\Documents and Settings\тест\Application Data'
os.environ['HOME'] = (test_str if sys.version_info >= (3, 0)
else test_str.encode(get_filesystem_encoding()))
self.assertEqual(compat_expanduser('~'), test_str)
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
if __name__ == '__main__':
unittest.main()

View File

@ -14,7 +14,7 @@ import re
import string
from youtube_dl.extractor import YoutubeIE
from youtube_dl.utils import compat_str, compat_urlretrieve
from youtube_dl.compat import compat_str, compat_urlretrieve
_TESTS = [
(

View File

@ -22,13 +22,15 @@ import traceback
if os.name == 'nt':
import ctypes
from .utils import (
from .compat import (
compat_cookiejar,
compat_expanduser,
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_request,
)
from .utils import (
escape_url,
ContentTooShortError,
date_from_str,
@ -62,6 +64,7 @@ from .utils import (
from .cache import Cache
from .extractor import get_info_extractor, gen_extractors
from .downloader import get_suitable_downloader
from .downloader.rtmp import rtmpdump_version
from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
from .version import __version__
@ -655,6 +658,8 @@ class YoutubeDL(object):
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
@ -833,6 +838,13 @@ class YoutubeDL(object):
formats_info = (self.select_format(format_1, formats),
self.select_format(format_2, formats))
if all(formats_info):
# The first format must contain the video and the
# second the audio
if formats_info[0].get('vcodec') == 'none':
self.report_error('The first format must '
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
selected_format = {
'requested_formats': formats_info,
'format': rf,
@ -1294,11 +1306,13 @@ class YoutubeDL(object):
self.report_warning(
'Your Python is broken! Update to a newer and supported version')
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
sys.stdout.encoding,
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
@ -1321,6 +1335,7 @@ class YoutubeDL(object):
platform.python_version(), platform_name()))
exe_versions = FFmpegPostProcessor.get_versions()
exe_versions['rtmpdump'] = rtmpdump_version()
exe_str = ', '.join(
'%s %s' % (exe, v)
for exe, v in sorted(exe_versions.items())

View File

@ -13,10 +13,12 @@ import sys
from .options import (
parseOpts,
)
from .utils import (
from .compat import (
compat_expanduser,
compat_getpass,
compat_print,
)
from .utils import (
DateRange,
DEFAULT_OUTTMPL,
decodeOption,

View File

@ -8,10 +8,8 @@ import re
import shutil
import traceback
from .utils import (
compat_expanduser,
write_json_file,
)
from .compat import compat_expanduser
from .utils import write_json_file
class Cache(object):

317
youtube_dl/compat.py Normal file
View File

@ -0,0 +1,317 @@
from __future__ import unicode_literals
import getpass
import os
import subprocess
import sys
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import unquote as compat_urllib_parse_unquote
except ImportError:
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
try:
from shlex import quote as shlex_quote
except ImportError: # Python < 3.3
def shlex_quote(s):
return "'" + s.replace("'", "'\"'\"'") + "'"
def compat_ord(c):
if type(c) is int: return c
else: return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if os.name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif os.name == 'nt' or os.name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: #~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert type(s) == type(u'')
print(s)
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
__all__ = [
'compat_HTTPError',
'compat_chr',
'compat_cookiejar',
'compat_expanduser',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_parser',
'compat_http_client',
'compat_ord',
'compat_parse_qs',
'compat_print',
'compat_str',
'compat_subprocess_get_DEVNULL',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'shlex_quote',
'subprocess_check_output',
]

View File

@ -12,9 +12,15 @@ from ..utils import (
compat_str,
encodeFilename,
format_bytes,
get_exe_version,
)
def rtmpdump_version():
return get_exe_version(
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
class RtmpFD(FileDownloader):
def real_download(self, filename, info_dict):
def run_rtmpdump(args):

View File

@ -127,6 +127,7 @@ from .francetv import (
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
@ -141,6 +142,7 @@ from .generic import GenericIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
@ -323,6 +325,7 @@ from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
@ -421,6 +424,7 @@ from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import VGTVIE
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE

View File

@ -11,13 +11,13 @@ class ABCIE(InfoExtractor):
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'http://www.abc.net.au/news/2014-07-25/bringing-asylum-seekers-to-australia-would-give/5624716',
'md5': 'dad6f8ad011a70d9ddf887ce6d5d0742',
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
'id': '5624716',
'id': '5868334',
'ext': 'mp4',
'title': 'Bringing asylum seekers to Australia would give them right to asylum claims: professor',
'description': 'md5:ba36fa5e27e5c9251fd929d339aea4af',
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
}

View File

@ -3,12 +3,13 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
)

View File

@ -22,7 +22,7 @@ class AllocineIE(InfoExtractor):
'id': '19546517',
'ext': 'mp4',
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
'description': 'md5:abcd09ce503c6560512c14ebfdb720d2',
'thumbnail': 're:http://.*\.jpg',
},
}, {

View File

@ -110,20 +110,25 @@ class BandcampAlbumIE(InfoExtractor):
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
'playlist': [
{
'file': '1353101989.mp3',
'md5': '39bc1eded3476e927c724321ddf116cf',
'info_dict': {
'id': '1353101989',
'ext': 'mp3',
'title': 'Intro',
}
},
{
'file': '38097443.mp3',
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
'info_dict': {
'id': '38097443',
'ext': 'mp3',
'title': 'Kero One - Keep It Alive (Blazo remix)',
}
},
],
'info_dict': {
'title': 'Jazz Format Mixtape vol.1',
},
'params': {
'playlistend': 2
},

View File

@ -14,6 +14,7 @@ from ..utils import (
compat_str,
compat_urllib_request,
compat_parse_qs,
compat_urllib_parse_urlparse,
determine_ext,
ExtractorError,
@ -23,7 +24,7 @@ from ..utils import (
class BrightcoveIE(InfoExtractor):
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*?\?(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_TESTS = [
@ -260,11 +261,19 @@ class BrightcoveIE(InfoExtractor):
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
if rend['remote']:
# This type of renditions are served through akamaihd.net,
# but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(url, info['id'], 'mp4'))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
else:
ext = determine_ext(url)
size = rend.get('size')

View File

@ -10,12 +10,12 @@ from ..utils import ExtractorError
class BYUtvIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
_TEST = {
'url': 'http://www.byutv.org/watch/44e80f7b-e3ba-43ba-8c51-b1fd96c94a79/granite-flats-talking',
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
'info_dict': {
'id': 'granite-flats-talking',
'id': 'studio-c-season-5-episode-5',
'ext': 'mp4',
'description': 'md5:4e9a7ce60f209a33eca0ac65b4918e1c',
'title': 'Talking',
'description': 'md5:5438d33774b6bdc662f9485a340401cc',
'title': 'Season 5 Episode 5',
'thumbnail': 're:^https?://.*promo.*'
},
'params': {

View File

@ -27,7 +27,7 @@ class Channel9IE(InfoExtractor):
'title': 'Developer Kick-Off Session: Stuff We Love',
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
'duration': 4576,
'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
'session_code': 'KOS002',
'session_day': 'Day 1',
'session_room': 'Arena 1A',
@ -43,7 +43,7 @@ class Channel9IE(InfoExtractor):
'title': 'Self-service BI with Power BI - nuclear testing',
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
'duration': 1540,
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
'authors': [ 'Mike Wilmot' ],
},
}
@ -94,7 +94,7 @@ class Channel9IE(InfoExtractor):
def _extract_title(self, html):
title = self._html_search_meta('title', html, 'title')
if title is None:
if title is None:
title = self._og_search_title(html)
TITLE_SUFFIX = ' (Channel 9)'
if title is not None and title.endswith(TITLE_SUFFIX):
@ -115,7 +115,7 @@ class Channel9IE(InfoExtractor):
return self._html_search_meta('description', html, 'description')
def _extract_duration(self, html):
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
def _extract_slides(self, html):
@ -167,7 +167,7 @@ class Channel9IE(InfoExtractor):
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
def _extract_content(self, html, content_path):
# Look for downloadable content
# Look for downloadable content
formats = self._formats_from_html(html)
slides = self._extract_slides(html)
zip_ = self._extract_zip(html)
@ -258,16 +258,17 @@ class Channel9IE(InfoExtractor):
webpage = self._download_webpage(url, content_path, 'Downloading web page')
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
if page_type_m is None:
raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
if page_type_m is not None:
page_type = page_type_m.group('pagetype')
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
elif page_type == 'Event':
return self._extract_list(content_path)
else:
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
page_type = page_type_m.group('pagetype')
if page_type == 'List': # List page, may contain list of 'item'-like objects
else: # Assuming list
return self._extract_list(content_path)
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
else:
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)

View File

@ -42,11 +42,12 @@ class CinemassacreIE(InfoExtractor):
webpage = self._download_webpage(url, display_id)
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?P<full_video_id>(?:Cinemassacre-)?(?P<video_id>.+?)))"', webpage)
if not mobj:
raise ExtractorError('Can\'t extract embed url and video id')
playerdata_url = mobj.group('embed_url')
video_id = mobj.group('video_id')
full_video_id = mobj.group('full_video_id')
video_title = self._html_search_regex(
r'<title>(?P<title>.+?)\|', webpage, 'title')
@ -60,37 +61,52 @@ class CinemassacreIE(InfoExtractor):
vidurl = self._search_regex(
r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
videolist_url = self._search_regex(
r"file\s*:\s*'(http.+?/jwplayer\.smil)'", playerdata, 'jwplayer.smil')
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
videolist_url = None
formats = []
baseurl = vidurl[:vidurl.rfind('/')+1]
for video in videolist.findall('.//video'):
src = video.get('src')
if not src:
continue
file_ = src.partition(':')[-1]
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
bitrate = int_or_none(video.get('system-bitrate'))
format = {
'url': baseurl + file_,
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
}
if width or height:
format.update({
'tbr': bitrate // 1000 if bitrate else None,
'width': width,
'height': height,
})
else:
format.update({
'abr': bitrate // 1000 if bitrate else None,
'vcodec': 'none',
})
formats.append(format)
self._sort_formats(formats)
mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
if mobj:
videoserver = mobj.group('videoserver')
mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
vidid = mobj.group('vidid') if mobj else full_video_id
videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
else:
mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
if mobj:
videolist_url = mobj.group('smil')
if videolist_url:
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
formats = []
baseurl = vidurl[:vidurl.rfind('/')+1]
for video in videolist.findall('.//video'):
src = video.get('src')
if not src:
continue
file_ = src.partition(':')[-1]
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
bitrate = int_or_none(video.get('system-bitrate'))
format = {
'url': baseurl + file_,
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
}
if width or height:
format.update({
'tbr': bitrate // 1000 if bitrate else None,
'width': width,
'height': height,
})
else:
format.update({
'abr': bitrate // 1000 if bitrate else None,
'vcodec': 'none',
})
formats.append(format)
self._sort_formats(formats)
else:
formats = [{
'url': vidurl,
}]
return {
'id': video_id,

View File

@ -4,14 +4,16 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
from ..compat import (
compat_parse_qs,
compat_urllib_parse,
remove_end,
HEADRequest,
compat_HTTPError,
)
from ..utils import (
ExtractorError,
HEADRequest,
remove_end,
)
class CloudyIE(InfoExtractor):

View File

@ -16,9 +16,10 @@ class CNNIE(InfoExtractor):
_TESTS = [{
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
'md5': '3e6121ea48df7e2259fe73a0628605c4',
'info_dict': {
'id': 'sports_2013_06_09_nadal-1-on-1.cnn',
'ext': 'mp4',
'title': 'Nadal wins 8th French Open title',
'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
'duration': 135,
@ -27,9 +28,10 @@ class CNNIE(InfoExtractor):
},
{
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
"info_dict": {
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
'ext': 'mp4',
"title": "Student's epic speech stuns new freshmen",
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
"upload_date": "20130821",

View File

@ -12,13 +12,14 @@ import sys
import time
import xml.etree.ElementTree
from ..utils import (
from ..compat import (
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
compiled_regex_type,
ExtractorError,
@ -403,7 +404,7 @@ class InfoExtractor(object):
video_info['title'] = playlist_title
return video_info
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
@ -424,8 +425,11 @@ class InfoExtractor(object):
_name = name
if mobj:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not _NO_DEFAULT:
return default
elif fatal:
@ -435,11 +439,11 @@ class InfoExtractor(object):
'please report this issue on http://yt-dl.org/bug' % _name)
return None
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags)
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
@ -533,9 +537,9 @@ class InfoExtractor(object):
display_name = name
return self._html_search_regex(
r'''(?ix)<meta
(?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?)
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
html, display_name, fatal=fatal, **kwargs)
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
[^>]+content=(["\'])(?P<content>.*?)\1''' % re.escape(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')

View File

@ -17,7 +17,6 @@ from ..utils import (
bytes_to_intlist,
intlist_to_bytes,
unified_strdate,
clean_html,
urlencode_postdata,
)
from ..aes import (

View File

@ -94,7 +94,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
# It may just embed a vevo video:
m_vevo = re.search(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')

View File

@ -5,7 +5,8 @@ import os.path
import re
from .common import InfoExtractor
from ..utils import compat_urllib_parse_unquote, url_basename
from ..compat import compat_urllib_parse_unquote
from ..utils import url_basename
class DropboxIE(InfoExtractor):

View File

@ -5,12 +5,14 @@ import re
import socket
from .common import InfoExtractor
from ..utils import (
from ..compat import (
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
urlencode_postdata,
ExtractorError,
limit_length,

View File

@ -0,0 +1,38 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class FreeVideoIE(InfoExtractor):
_VALID_URL = r'^http://www.freevideo.cz/vase-videa/(?P<id>[^.]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.freevideo.cz/vase-videa/vysukany-zadecek-22033.html',
'info_dict': {
'id': 'vysukany-zadecek-22033',
'ext': 'mp4',
"title": "vysukany-zadecek-22033",
"age_limit": 18,
},
'skip': 'Blocked outside .cz',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage, handle = self._download_webpage_handle(url, video_id)
if '//www.czechav.com/' in handle.geturl():
raise ExtractorError(
'Access to freevideo is blocked from your location',
expected=True)
video_url = self._search_regex(
r'\s+url: "(http://[a-z0-9-]+.cdn.freevideo.cz/stream/.*?/video.mp4)"',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_id,
'age_limit': 18,
}

View File

@ -21,7 +21,6 @@ class FunnyOrDieIE(InfoExtractor):
},
}, {
'url': 'http://www.funnyordie.com/embed/e402820827',
'md5': '29f4c5e5a61ca39dfd7e8348a75d0aad',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',

View File

@ -8,12 +8,11 @@ from ..utils import (
compat_urllib_parse,
compat_urlparse,
unescapeHTML,
get_meta_content,
)
class GameSpotIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
_TEST = {
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
@ -26,10 +25,10 @@ class GameSpotIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_id = mobj.group('page_id')
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
data_video_json = self._search_regex(r'data-video=["\'](.*?)["\']', webpage, 'data video')
data_video_json = self._search_regex(
r'data-video=["\'](.*?)["\']', webpage, 'data video')
data_video = json.loads(unescapeHTML(data_video_json))
# Transform the manifest url to a link to the mp4 files
@ -41,7 +40,8 @@ class GameSpotIE(InfoExtractor):
http_path = f4m_path[1:].split('/', 1)[1]
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
http_template = http_template.replace('.csmil/manifest.f4m', '')
http_template = compat_urlparse.urljoin('http://video.gamespotcdn.com/', http_template)
http_template = compat_urlparse.urljoin(
'http://video.gamespotcdn.com/', http_template)
formats = []
for q in qualities:
formats.append({
@ -52,8 +52,9 @@ class GameSpotIE(InfoExtractor):
return {
'id': data_video['guid'],
'display_id': page_id,
'title': compat_urllib_parse.unquote(data_video['title']),
'formats': formats,
'description': get_meta_content('description', webpage),
'description': self._html_search_meta('description', webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}

View File

@ -7,11 +7,12 @@ import re
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
from ..compat import (
compat_urllib_parse,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
@ -99,6 +100,22 @@ class GenericIE(InfoExtractor):
'uploader': 'Championat',
},
},
{
# https://github.com/rg3/youtube-dl/issues/3541
'add_ie': ['Brightcove'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
@ -417,7 +434,17 @@ class GenericIE(InfoExtractor):
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
}
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
]
def report_following_redirect(self, new_url):
@ -559,6 +586,7 @@ class GenericIE(InfoExtractor):
return {
'id': video_id,
'title': os.path.splitext(url_basename(url))[0],
'direct': True,
'formats': [{
'format_id': m.group('format_id'),
'url': url,
@ -898,6 +926,12 @@ class GenericIE(InfoExtractor):
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://new\.livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
def check_video(vurl):
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)

View File

@ -5,13 +5,15 @@ import random
import math
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
from ..compat import (
compat_str,
compat_chr,
compat_ord,
)
from ..utils import (
ExtractorError,
float_or_none,
)
class GloboIE(InfoExtractor):

View File

@ -0,0 +1,48 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
)
class GoldenMoustacheIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)'
_TEST = {
'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/',
'md5': '0f904432fa07da5054d6c8beb5efb51a',
'info_dict': {
'id': '3700',
'ext': 'mp4',
'title': 'Suricate - Le Poker',
'description': 'md5:3d1f242f44f8c8cb0a106f1fd08e5dc9',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL')
title = self._html_search_regex(
r'<title>(.*?) - Golden Moustache</title>', webpage, 'title')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
view_count = int_or_none(self._html_search_regex(
r'<strong>([0-9]+)</strong>\s*VUES</span>',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': description,
'thumbnail': thumbnail,
'view_count': view_count,
}

View File

@ -1,15 +1,11 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
str_to_int,
ExtractorError,
)
import json
class GoshgayIE(InfoExtractor):
@ -27,36 +23,27 @@ class GoshgayIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._search_regex(r'class="video-title"><h1>(.+?)<', webpage, 'title')
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
family_friendly = self._html_search_meta(
'isFamilyFriendly', webpage, default='false')
config_url = self._search_regex(
r"'config'\s*:\s*'([^']+)'", webpage, 'config URL')
player_config = self._search_regex(
r'(?s)jwplayer\("player"\)\.setup\(({.+?})\)', webpage, 'config settings')
player_vars = json.loads(player_config.replace("'", '"'))
width = str_to_int(player_vars.get('width'))
height = str_to_int(player_vars.get('height'))
config_uri = player_vars.get('config')
config = self._download_xml(
config_url, video_id, 'Downloading player config XML')
if config_uri is None:
raise ExtractorError('Missing config URI')
node = self._download_xml(config_uri, video_id, 'Downloading player config XML',
errnote='Unable to download XML')
if node is None:
if config is None:
raise ExtractorError('Missing config XML')
if node.tag != 'config':
if config.tag != 'config':
raise ExtractorError('Missing config attribute')
fns = node.findall('file')
imgs = node.findall('image')
if len(fns) != 1:
fns = config.findall('file')
if len(fns) < 1:
raise ExtractorError('Missing media URI')
video_url = fns[0].text
if len(imgs) < 1:
thumbnail = None
else:
thumbnail = imgs[0].text
url_comp = compat_urlparse.urlparse(url)
ref = "%s://%s%s" % (url_comp[0], url_comp[1], url_comp[2])
@ -65,9 +52,7 @@ class GoshgayIE(InfoExtractor):
'id': video_id,
'url': video_url,
'title': title,
'width': width,
'height': height,
'thumbnail': thumbnail,
'http_referer': ref,
'age_limit': 18,
'age_limit': 0 if family_friendly == 'true' else 18,
}

View File

@ -8,12 +8,13 @@ import re
from .common import InfoExtractor
from ..utils import ExtractorError, compat_urllib_request, compat_html_parser
from ..utils import (
from ..compat import (
compat_html_parser,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
)
from ..utils import ExtractorError
class GroovesharkHtmlParser(compat_html_parser.HTMLParser):

View File

@ -3,7 +3,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
get_meta_content,
determine_ext,
int_or_none,
parse_iso8601,
)
@ -25,11 +25,11 @@ class HeiseIE(InfoExtractor):
'title': (
"Podcast: c't uplink 3.3 Owncloud / Tastaturen / Peilsender Smartphone"
),
'format_id': 'mp4_720',
'format_id': 'mp4_720p',
'timestamp': 1411812600,
'upload_date': '20140927',
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
'thumbnail': 're:https?://.*\.jpg$',
'thumbnail': 're:^https?://.*\.jpe?g$',
}
}
@ -49,11 +49,12 @@ class HeiseIE(InfoExtractor):
info = {
'id': video_id,
'thumbnail': self._og_search_thumbnail(webpage),
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
'timestamp': parse_iso8601(
self._html_search_meta('date', webpage)),
'description': self._og_search_description(webpage),
}
title = get_meta_content('fulltitle', webpage)
title = self._html_search_meta('fulltitle', webpage)
if title:
info['title'] = title
else:
@ -64,9 +65,12 @@ class HeiseIE(InfoExtractor):
label = source_node.attrib['label']
height = int_or_none(self._search_regex(
r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
video_url = source_node.attrib['file']
ext = determine_ext(video_url, '')
formats.append({
'url': source_node.attrib['file'],
'url': video_url,
'format_note': label,
'format_id': '%s_%s' % (ext, label),
'height': height,
})
self._sort_formats(formats)

View File

@ -6,7 +6,6 @@ import json
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
get_element_by_attribute,
)
@ -27,10 +26,11 @@ class ImdbIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
descr = get_element_by_attribute('itemprop', 'description', webpage)
descr = self._html_search_regex(
r'(?s)<span itemprop="description">(.*?)</span>',
webpage, 'description', fatal=False)
available_formats = re.findall(
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
flags=re.MULTILINE)
@ -73,9 +73,7 @@ class ImdbListIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
list_id = mobj.group('id')
list_id = self._match_id(url)
webpage = self._download_webpage(url, list_id)
entries = [
self.url_result('http://www.imdb.com' + m, 'Imdb')

View File

@ -5,11 +5,11 @@ import re
from .common import InfoExtractor
from ..utils import (
get_element_by_id,
parse_iso8601,
determine_ext,
int_or_none,
float_or_none,
get_element_by_id,
int_or_none,
parse_iso8601,
str_to_int,
)
@ -30,7 +30,7 @@ class IzleseneIE(InfoExtractor):
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': 1404298698,
'timestamp': 1404302298,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
@ -46,7 +46,7 @@ class IzleseneIE(InfoExtractor):
'description': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': 1163318593,
'timestamp': 1163322193,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
@ -55,10 +55,9 @@ class IzleseneIE(InfoExtractor):
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://www.izlesene.com/video/%s' % video_id
video_id = self._match_id(url)
url = 'http://www.izlesene.com/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)

View File

@ -4,6 +4,7 @@ import random
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class Laola1TvIE(InfoExtractor):

View File

@ -18,7 +18,7 @@ from ..utils import (
class LivestreamIE(InfoExtractor):
IE_NAME = 'livestream'
_VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
_VALID_URL = r'https?://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
_TESTS = [{
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
'md5': '53274c76ba7754fb0e8d072716f2292b',
@ -37,6 +37,9 @@ class LivestreamIE(InfoExtractor):
'title': 'TEDCity2.0 (English)',
},
'playlist_mincount': 4,
}, {
'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
'only_matching': True,
}]
def _parse_smil(self, video_id, smil_url):

View File

@ -33,7 +33,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
if not m:
return rtmp_video_url
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
base = 'http://viacommtvstrmfs.fplive.net/'
return base + m.group('finalid')
def _get_feed_url(self, uri):

View File

@ -13,9 +13,10 @@ class MySpassIE(InfoExtractor):
_VALID_URL = r'http://www\.myspass\.de/.*'
_TEST = {
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
'file': '11741.mp4',
'md5': '0b49f4844a068f8b33f4b7c88405862b',
'info_dict': {
'id': '11741',
'ext': 'mp4',
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
},

View File

@ -7,11 +7,12 @@ import re
import json
from .common import InfoExtractor
from ..utils import (
from ..compat import (
compat_ord,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)

View File

@ -67,7 +67,7 @@ class NDRIE(InfoExtractor):
thumbnail = None
video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.(lo|hi|hq)\.mp4', type:"video/mp4"},''', page)
if video_url:
thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
if thumbnails:

View File

@ -12,6 +12,7 @@ from ..utils import (
unified_strdate,
parse_duration,
int_or_none,
ExtractorError,
)
@ -108,6 +109,9 @@ class NiconicoIE(InfoExtractor):
flv_info_request, video_id,
note='Downloading flv info', errnote='Unable to download flv info')
if 'deleted=' in flv_info_webpage:
raise ExtractorError('The video has been deleted.',
expected=True)
video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]
# Start extracting information
@ -171,7 +175,8 @@ class NiconicoPlaylistIE(InfoExtractor):
entries = [{
'_type': 'url',
'ie_key': NiconicoIE.ie_key(),
'url': 'http://www.nicovideo.jp/watch/%s' % entry['item_id'],
'url': ('http://www.nicovideo.jp/watch/%s' %
entry['item_data']['video_id']),
} for entry in entries]
return {

View File

@ -6,6 +6,7 @@ import os.path
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_parse,
compat_urllib_request,
)
@ -29,6 +30,12 @@ class PlayedIE(InfoExtractor):
video_id = self._match_id(url)
orig_webpage = self._download_webpage(url, video_id)
m_error = re.search(
r'(?s)Reason for deletion:.*?<b class="err"[^>]*>(?P<msg>[^<]+)</b>', orig_webpage)
if m_error:
raise ExtractorError(m_error.group('msg'), expected=True)
fields = re.findall(
r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
data = dict(fields)

View File

@ -1,7 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import compat_urllib_parse_unquote
from ..compat import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):

View File

@ -0,0 +1,61 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class SexuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?sexu\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://sexu.com/961791/',
'md5': 'ff615aca9691053c94f8f10d96cd7884',
'info_dict': {
'id': '961791',
'ext': 'mp4',
'title': 'md5:4d05a19a5fc049a63dbbaf05fb71d91b',
'description': 'md5:c5ed8625eb386855d5a7967bd7b77a54',
'categories': list, # NSFW
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
quality_arr = self._search_regex(
r'sources:\s*\[([^\]]+)\]', webpage, 'forrmat string')
formats = [{
'url': fmt[0].replace('\\', ''),
'format_id': fmt[1],
'height': int(fmt[1][:3]),
} for fmt in re.findall(r'"file":"([^"]+)","label":"([^"]+)"', quality_arr)]
self._sort_formats(formats)
title = self._html_search_regex(
r'<title>([^<]+)\s*-\s*Sexu\.Com</title>', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
thumbnail = self._html_search_regex(
r'image:\s*"([^"]+)"',
webpage, 'thumbnail', fatal=False)
categories_str = self._html_search_meta(
'keywords', webpage, 'categories')
categories = (
None if categories_str is None
else categories_str.split(','))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats,
'age_limit': 18,
}

View File

@ -4,11 +4,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import compat_urlparse
from ..compat import compat_urlparse
class SpiegelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$'
_TESTS = [{
'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
'md5': '2c2754212136f35fb4b19767d242f66e',
@ -29,16 +29,24 @@ class SpiegelIE(InfoExtractor):
'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
'duration': 983,
},
}, {
'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
'md5': 'd8eeca6bfc8f1cd6f490eb1f44695d51',
'info_dict': {
'id': '1519126',
'ext': 'mp4',
'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('videoID')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<div class="module-title">(.*?)</div>', webpage, 'title')
title = re.sub(r'\s+', ' ', self._html_search_regex(
r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>',
webpage, 'title'))
description = self._html_search_meta('description', webpage, 'description')
base_url = self._search_regex(
@ -79,7 +87,7 @@ class SpiegelArticleIE(InfoExtractor):
_VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
IE_NAME = 'Spiegel:Article'
IE_DESC = 'Articles on spiegel.de'
_TEST = {
_TESTS = [{
'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
'info_dict': {
'id': '1516455',
@ -87,20 +95,34 @@ class SpiegelArticleIE(InfoExtractor):
'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
'description': 're:^Patrick Kämnitz gehört.{100,}',
},
}
}, {
'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
'info_dict': {
},
'playlist_count': 6,
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# Single video on top of the page
video_link = self._search_regex(
r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
'video page URL')
video_url = compat_urlparse.urljoin(
self.http_scheme() + '//spiegel.de/', video_link)
'video page URL', default=None)
if video_link:
video_url = compat_urlparse.urljoin(
self.http_scheme() + '//spiegel.de/', video_link)
return self.url_result(video_url)
return {
'_type': 'url',
'url': video_url,
}
# Multiple embedded videos
embeds = re.findall(
r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
webpage)
entries = [
self.url_result(compat_urlparse.urljoin(
self.http_scheme() + '//spiegel.de/', embed_path))
for embed_path in embeds
]
return self.playlist_result(entries)

View File

@ -13,7 +13,7 @@ from ..utils import (
class StreamcloudIE(InfoExtractor):
IE_NAME = 'streamcloud.eu'
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)/(?P<fname>[^#?]*)\.html'
_VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'
_TEST = {
'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
@ -27,8 +27,8 @@ class StreamcloudIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
url = 'http://streamcloud.eu/%s' % video_id
orig_webpage = self._download_webpage(url, video_id)

View File

@ -80,7 +80,7 @@ class SWRMediathekIE(InfoExtractor):
if media_type == 'Video':
fmt.update({
'format_note': ['144p', '288p', '544p'][quality-1],
'format_note': ['144p', '288p', '544p', '720p'][quality-1],
'vcodec': codec,
})
elif media_type == 'Audio':
@ -101,4 +101,4 @@ class SWRMediathekIE(InfoExtractor):
'uploader': attr['channel_title'],
'uploader_id': attr['channel_idkey'],
'formats': formats,
}
}

View File

@ -50,6 +50,7 @@ class TapelyIE(InfoExtractor):
request = compat_urllib_request.Request(playlist_url)
request.add_header('X-Requested-With', 'XMLHttpRequest')
request.add_header('Accept', 'application/json')
request.add_header('Referer', url)
playlist = self._download_json(request, display_id)

View File

@ -38,6 +38,7 @@ class TEDIE(SubtitlesInfoExtractor):
'actively fooling us.'),
'uploader': 'Dan Dennett',
'width': 854,
'duration': 1308,
}
}, {
'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms',
@ -57,6 +58,7 @@ class TEDIE(SubtitlesInfoExtractor):
'title': 'Be passionate. Be courageous. Be your best.',
'uploader': 'Gabby Giffords and Mark Kelly',
'description': 'md5:5174aed4d0f16021b704120360f72b92',
'duration': 1128,
},
}, {
'url': 'http://www.ted.com/playlists/who_are_the_hackers',
@ -178,6 +180,7 @@ class TEDIE(SubtitlesInfoExtractor):
'description': self._og_search_description(webpage),
'subtitles': video_subtitles,
'formats': formats,
'duration': talk_info.get('duration'),
}
def _get_available_subtitles(self, video_id, talk_info):

View File

@ -29,7 +29,7 @@ class TruTubeIE(InfoExtractor):
# filehd is always 404
video_url = xpath_text(config, './file', 'video URL', fatal=True)
title = xpath_text(config, './title', 'title')
title = xpath_text(config, './title', 'title').strip()
thumbnail = xpath_text(config, './image', ' thumbnail')
return {

View File

@ -4,9 +4,9 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
compat_str,
parse_iso8601,
qualities,
)
@ -176,8 +176,7 @@ class TVPlayIE(InfoExtractor):
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
video = self._download_json(
'http://playapi.mtgx.tv/v1/videos/%s' % video_id, video_id, 'Downloading video JSON')
@ -208,6 +207,10 @@ class TVPlayIE(InfoExtractor):
'app': m.group('app'),
'play_path': m.group('playpath'),
})
elif video_url.endswith('.f4m'):
formats.extend(self._extract_f4m_formats(
video_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id))
continue
else:
fmt.update({
'url': video_url,

View File

@ -5,7 +5,6 @@ import re
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
get_meta_content,
)
@ -79,7 +78,7 @@ class UstreamChannelIE(InfoExtractor):
m = re.match(self._VALID_URL, url)
display_id = m.group('slug')
webpage = self._download_webpage(url, display_id)
channel_id = get_meta_content('ustream:channel_id', webpage)
channel_id = self._html_search_meta('ustream:channel_id', webpage)
BASE = 'http://www.ustream.tv'
next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id

View File

@ -0,0 +1,38 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .ooyala import OoyalaIE
from ..utils import ExtractorError
class ViceIE(InfoExtractor):
_VALID_URL = r'http://www\.vice\.com/.*?/(?P<name>.+)'
_TEST = {
'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
'info_dict': {
'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
'ext': 'mp4',
'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
},
'params': {
# Requires ffmpeg (m3u8 manifest)
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
try:
embed_code = self._search_regex(
r'embedCode=([^&\'"]+)', webpage,
'ooyala embed code')
ooyala_url = OoyalaIE._url_for_embed_code(embed_code)
print(ooyala_url)
except ExtractorError:
raise ExtractorError('The page doesn\'t contain a video', expected=True)
return self.url_result(ooyala_url, ie='Ooyala')

View File

@ -7,11 +7,13 @@ import itertools
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
from ..compat import (
compat_HTTPError,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
ExtractorError,
InAdvancePagedList,
int_or_none,

View File

@ -37,7 +37,7 @@ class WimpIE(InfoExtractor):
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r"'file'\s*:\s*'([^']+)'", webpage, 'video URL')
r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", webpage, 'video URL')
if YoutubeIE.suitable(video_url):
self.to_screen('Found YouTube video')
return {

View File

@ -27,15 +27,15 @@ class WrzutaIE(InfoExtractor):
'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
},
}, {
'url': 'http://w729.wrzuta.pl/audio/9oXJqdcndqv/david_guetta_amp_showtek_ft._vassy_-_bad',
'md5': '1e546a18e1c22ac6e9adce17b8961ff5',
'url': 'http://jolka85.wrzuta.pl/audio/063jOPX5ue2/liber_natalia_szroeder_-_teraz_ty',
'md5': 'bc78077859bea7bcfe4295d7d7fc9025',
'info_dict': {
'id': '9oXJqdcndqv',
'id': '063jOPX5ue2',
'ext': 'ogg',
'title': 'David Guetta & Showtek ft. Vassy - Bad',
'duration': 270,
'uploader_id': 'w729',
'description': 'md5:4628f01c666bbaaecefa83476cfa794a',
'title': 'Liber & Natalia Szroeder - Teraz Ty',
'duration': 203,
'uploader_id': 'jolka85',
'description': 'md5:2d2b6340f9188c8c4cd891580e481096',
},
}]
@ -49,16 +49,17 @@ class WrzutaIE(InfoExtractor):
quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
audio_table = {'flv': 'mp3', 'webm': 'ogg'}
audio_table = {'flv': 'mp3', 'webm': 'ogg', '???': 'mp3'}
embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id)
formats = []
for media in embedpage['url']:
fmt = media['type'].split('@')[0]
if typ == 'audio':
ext = audio_table[media['type'].split('@')[0]]
ext = audio_table.get(fmt, fmt)
else:
ext = media['type'].split('@')[0]
ext = fmt
formats.append({
'format_id': '%s_%s' % (ext, media['quality'].lower()),

View File

@ -9,40 +9,30 @@ from ..utils import (
class YouJizzIE(InfoExtractor):
_VALID_URL = r'^https?://(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+)\.html$'
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'file': '2189178.flv',
'md5': '07e15fa469ba384c7693fd246905547c',
'info_dict': {
'id': '2189178',
'ext': 'flv',
"title": "Zeichentrick 1",
"age_limit": 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
# Get webpage content
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
# Get the video title
video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
webpage, 'title').strip()
# Get the embed page
result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
if result is None:
raise ExtractorError('ERROR: unable to extract embed page')
embed_page_url = result.group(0).strip()
video_id = result.group('videoid')
webpage = self._download_webpage(embed_page_url, video_id)
embed_page_url = self._search_regex(
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
webpage, 'embed page')
webpage = self._download_webpage(
embed_page_url, video_id, note='downloading embed page')
# Get the video URL
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)

View File

@ -510,7 +510,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'signature=([$a-zA-Z]+)', jscode,
r'\.sig\|\|([a-zA-Z0-9]+)\(', jscode,
'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
@ -684,7 +684,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
# Get video info
self.report_video_info_webpage_download(video_id)
if re.search(r'player-age-gate-content">', video_webpage) is not None:
self.report_age_confirmation()
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
@ -692,12 +691,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
r'"sts"\s*:\s*(\d+)', video_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(video_info_url, video_id,
note=False,
errnote='unable to download video info webpage')
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
else:
age_gate = False
@ -1043,6 +1043,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {

View File

@ -5,9 +5,11 @@ import optparse
import shlex
import sys
from .utils import (
from .compat import (
compat_expanduser,
compat_getenv,
)
from .utils import (
get_term_width,
write_string,
)
@ -259,7 +261,16 @@ def parseOpts(overrideArguments=None):
video_format.add_option(
'-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='video format code, specify the order of preference using slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio')
help='video format code, specify the order of preference using'
' slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also'
' supported. You can also use the special names "best",'
' "bestvideo", "bestaudio", "worst", "worstvideo" and'
' "worstaudio". By default, youtube-dl will pick the best quality.'
' Use commas to download multiple audio formats, such as'
' -f 136/137/mp4/bestvideo,140/m4a/bestaudio.'
' You can merge the video and audio of two formats into a single'
' file using -f <video-format>+<audio-format> (requires ffmpeg or'
' avconv), for example -f bestvideo+bestaudio.')
video_format.add_option(
'--all-formats',
action='store_const', dest='format', const='all',
@ -479,10 +490,12 @@ def parseOpts(overrideArguments=None):
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
'%(upload_date)s for the upload date (YYYYMMDD), '
'%(extractor)s for the provider (youtube, metacafe, etc), '
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
'%(id)s for the video id, '
'%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist. '
'%(height)s and %(width)s for the width and height of the video format. '
'%(resolution)s for a textual description of the resolution of the video format. '
'%% for a literal percent. '
'Use - to output to stdout. Can also be used to download to a different directory, '
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option(

View File

@ -6,10 +6,11 @@ import os
import subprocess
from .common import PostProcessor
from ..compat import (
compat_urlretrieve,
)
from ..utils import (
check_executable,
compat_urlretrieve,
encodeFilename,
PostProcessingError,
prepend_extension,

View File

@ -3,10 +3,8 @@ from __future__ import unicode_literals
import subprocess
from .common import PostProcessor
from ..utils import (
shlex_quote,
PostProcessingError,
)
from ..compat import shlex_quote
from ..utils import PostProcessingError
class ExecAfterDownloadPP(PostProcessor):

View File

@ -1,5 +1,4 @@
import os
import re
import subprocess
import sys
import time
@ -7,10 +6,13 @@ import time
from .common import AudioConversionError, PostProcessor
from ..utils import (
from ..compat import (
compat_subprocess_get_DEVNULL,
)
from ..utils import (
encodeArgument,
encodeFilename,
get_exe_version,
is_outdated_version,
PostProcessingError,
prepend_extension,
@ -19,23 +21,6 @@ from ..utils import (
)
def get_version(executable):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
out, err = subprocess.Popen(
[executable, '-version'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return False
firstline = out.partition(b'\n')[0].decode('ascii', 'ignore')
m = re.search(r'version\s+([0-9._-a-zA-Z]+)', firstline)
if not m:
return u'present'
else:
return m.group(1)
class FFmpegPostProcessorError(PostProcessingError):
pass
@ -61,7 +46,7 @@ class FFmpegPostProcessor(PostProcessor):
@staticmethod
def get_versions():
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
return dict((program, get_version(program)) for program in programs)
return dict((p, get_exe_version(p, args=['-version'])) for p in programs)
@property
def _executable(self):

View File

@ -3,10 +3,12 @@ import subprocess
import sys
from .common import PostProcessor
from ..compat import (
subprocess_check_output
)
from ..utils import (
check_executable,
hyphenate_date,
subprocess_check_output
)

View File

@ -1,6 +1,8 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import calendar
import codecs
import contextlib
@ -8,7 +10,6 @@ import ctypes
import datetime
import email.utils
import errno
import getpass
import gzip
import itertools
import io
@ -29,254 +30,18 @@ import traceback
import xml.etree.ElementTree
import zlib
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import unquote as compat_urllib_parse_unquote
except ImportError:
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
try:
from shlex import quote as shlex_quote
except ImportError: # Python < 3.3
def shlex_quote(s):
return "'" + s.replace("'", "'\"'\"'") + "'"
def compat_ord(c):
if type(c) is int: return c
else: return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if os.name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif os.name == 'nt' or os.name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: #~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
from .compat import (
compat_chr,
compat_getenv,
compat_html_entities,
compat_parse_qs,
compat_str,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
)
# This is not clearly defined otherwise
@ -304,14 +69,6 @@ def preferredencoding():
return pref
if sys.version_info < (3,0):
def compat_print(s):
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert type(s) == type(u'')
print(s)
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically """
@ -394,127 +151,32 @@ def xpath_text(node, xpath, name=None, fatal=False):
return n.text
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
class BaseHTMLParser(compat_html_parser.HTMLParser):
def __init(self):
compat_html_parser.HTMLParser.__init__(self)
self.html = None
def loads(self, html):
self.html = html
self.feed(html)
self.close()
class AttrParser(BaseHTMLParser):
"""Modified HTMLParser that isolates a tag with the specified attribute"""
def __init__(self, attribute, value):
self.attribute = attribute
self.value = value
self.result = None
self.started = False
self.depth = {}
self.watch_startpos = False
self.error_count = 0
BaseHTMLParser.__init__(self)
def error(self, message):
if self.error_count > 10 or self.started:
raise compat_html_parser.HTMLParseError(message, self.getpos())
self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
self.error_count += 1
self.goahead(1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if self.started:
self.find_startpos(None)
if self.attribute in attrs and attrs[self.attribute] == self.value:
self.result = [tag]
self.started = True
self.watch_startpos = True
if self.started:
if not tag in self.depth: self.depth[tag] = 0
self.depth[tag] += 1
def handle_endtag(self, tag):
if self.started:
if tag in self.depth: self.depth[tag] -= 1
if self.depth[self.result[0]] == 0:
self.started = False
self.result.append(self.getpos())
def find_startpos(self, x):
"""Needed to put the start position of the result (self.result[1])
after the opening tag with the requested id"""
if self.watch_startpos:
self.watch_startpos = False
self.result.append(self.getpos())
handle_entityref = handle_charref = handle_data = handle_comment = \
handle_decl = handle_pi = unknown_decl = find_startpos
def get_result(self):
if self.result is None:
return None
if len(self.result) != 3:
return None
lines = self.html.split('\n')
lines = lines[self.result[1][0]-1:self.result[2][0]]
lines[0] = lines[0][self.result[1][1]:]
if len(lines) == 1:
lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
lines[-1] = lines[-1][:self.result[2][1]]
return '\n'.join(lines).strip()
# Hack for https://github.com/rg3/youtube-dl/issues/662
if sys.version_info < (2, 7, 3):
AttrParser.parse_endtag = (lambda self, i:
i + len("</scr'+'ipt>")
if self.rawdata[i:].startswith("</scr'+'ipt>")
else compat_html_parser.HTMLParser.parse_endtag(self, i))
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute("id", id, html)
def get_element_by_attribute(attribute, value, html):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
parser = AttrParser(attribute, value)
try:
parser.loads(html)
except compat_html_parser.HTMLParseError:
pass
return parser.get_result()
class MetaParser(BaseHTMLParser):
"""
Modified HTMLParser that isolates a meta tag with the specified name
attribute.
"""
def __init__(self, name):
BaseHTMLParser.__init__(self)
self.name = name
self.content = None
self.result = None
m = re.search(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), re.escape(value)), html)
def handle_starttag(self, tag, attrs):
if tag != 'meta':
return
attrs = dict(attrs)
if attrs.get('name') == self.name:
self.result = attrs.get('content')
if not m:
return None
res = m.group('content')
def get_result(self):
return self.result
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
def get_meta_content(name, html):
"""
Return the content attribute from the meta tag with the given name attribute.
"""
parser = MetaParser(name)
try:
parser.loads(html)
except compat_html_parser.HTMLParseError:
pass
return parser.get_result()
return unescapeHTML(res)
def clean_html(html):
@ -1181,10 +843,7 @@ def bytes_to_intlist(bs):
def intlist_to_bytes(xs):
if not xs:
return b''
if isinstance(chr(0), bytes): # Python 2
return ''.join([chr(x) for x in xs])
else:
return bytes(xs)
return struct.pack('%dB' % len(xs), *xs)
# Cross-platform file locking
@ -1472,6 +1131,25 @@ def check_executable(exe, args=[]):
return exe
def get_exe_version(exe, args=['--version'],
version_re=r'version\s+([0-9._-a-zA-Z]+)',
unrecognized=u'present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
out, err = subprocess.Popen(
[exe] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return False
firstline = out.partition(b'\n')[0].decode('ascii', 'ignore')
m = re.search(version_re, firstline)
if m:
return m.group(1)
else:
return unrecognized
class PagedList(object):
def __len__(self):
# This is only useful for tests
@ -1562,7 +1240,7 @@ def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, unicode):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, "%/;:@&=+$,!~*'()?#[]")
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
@ -1636,15 +1314,6 @@ def parse_xml(s):
return tree
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
US_RATINGS = {
'G': 0,
'PG': 10,
@ -1702,18 +1371,6 @@ def qualities(quality_ids):
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
def limit_length(s, length):
""" Add ellipses to overly long strings """

View File

@ -1,2 +1,2 @@
__version__ = '2014.11.02.1'
__version__ = '2014.11.13.2'