Compare commits

...

117 Commits

Author SHA1 Message Date
10606050bc release 2014.10.02 2014-10-02 15:30:19 +02:00
d9bf465200 [bandcamp] Fix id extraction and modernize (Closes #3866) 2014-10-02 20:22:46 +07:00
01384d6e4b [jpopsuki] More modernize 2014-10-02 19:58:28 +07:00
08d5230945 [jpopsukitv] Improve _VALID_URL and modernize 2014-10-02 19:43:25 +07:00
852f8641e8 Merge pull request #3865 from diffycat/jpopsuki
[jpopsuki] Support category links
2014-10-02 19:38:29 +07:00
18937a50a4 [dropbox] Fix the video url query (fixes #3868)
Previously it would convert urls without a query into 'something.mp4&dl=1'
2014-10-01 23:19:56 +02:00
d5feab9aaa [jpopsuki] Support category links 2014-10-01 23:24:23 +04:00
9e77c60c9a [tapely] Catch SoundCloud tracks 2014-10-01 21:53:45 +03:00
1414df5ce2 [izlesene] Prepend scheme to thumbnails 2014-10-01 21:11:38 +03:00
e80f40e5ca [tapely] Add new extractor (closes #3861) 2014-10-01 17:26:09 +03:00
d3c9af84fc [spankwire] Fix extraction 2014-10-01 20:53:58 +07:00
59d206ca2d [sunporno] Fix duration extraction and make more robust 2014-10-01 20:44:43 +07:00
e7b6d12254 [utils] Improve and test js_to_json 2014-10-01 00:08:34 +02:00
410f3e73ab [utils] Fix js_to_json 2014-10-01 00:08:28 +02:00
07e764439a [generic] Delete test case
The page is not available any more.
2014-10-01 00:08:19 +02:00
f8fb4a7ca8 [nfl] Use compatible urlparse 2014-09-30 20:01:37 +03:00
e497a7f2ca [tvigle] Extract format file sizes 2014-09-30 20:00:21 +07:00
a3b6be104d [tvigle] Replace 404 test 2014-09-30 19:55:30 +07:00
b7bb0df21e [vgtv] Fix tests' exts 2014-09-30 19:50:14 +07:00
4dc19c0982 [lrt] Add new extractor 2014-09-30 02:26:16 +03:00
58ea7ec81e [vimeo] Fix description extraction 2014-09-29 22:23:21 +02:00
c0f64ac689 [test/helper] Improve output of missing test definition dictionaries 2014-09-29 22:19:11 +02:00
7a08ad7d59 [test/helper] Modernize 2014-09-29 22:11:24 +02:00
2d29ac4f23 [vuclip] Fix regexp 2014-09-29 21:48:44 +02:00
a7a747d687 [vuclip] Remove test code 2014-09-29 21:47:57 +02:00
fdb4d278bf [spankwire] Fix extraction and modernize 2014-09-29 20:11:51 +07:00
59c03a9bfb [vuclip] Fix extraction 2014-09-29 13:07:58 +02:00
e7db973328 [yahoo] Remove test case
This video seems to have been removed entirely
2014-09-29 12:45:57 +02:00
99b67fecc5 [arte] Fix upload date extraction 2014-09-29 12:45:18 +02:00
89294b5f50 [README] Explain updating in detail (Fixes #3850) 2014-09-29 12:33:29 +02:00
72d53356f6 [internetvideoarchive] Fix test case 2014-09-29 12:24:48 +02:00
9e1e67fc15 [internetvideoarchive] Modernize 2014-09-29 12:23:52 +02:00
1e60e5546e [funnyordie] Fix test case md5 2014-09-29 12:20:25 +02:00
457749a703 [prosiebensat1] Fix test case 2014-09-29 12:18:49 +02:00
937f935db0 [jukebox] Remove md5 sum, it fluctuates 2014-09-29 12:15:49 +02:00
80bcefcd77 [cliphunter] Remove duration 2014-09-29 06:22:54 +02:00
8c23945c72 [eporner] Adapt to changed default format 2014-09-29 06:19:18 +02:00
989b4b2b86 [utils:YoutubeDLHandler] Work around brain-dead Python 2.6 httplib
In 2.6, the httplib sends fragments! Remove those (fixes generic_26 on 2.6).
2014-09-29 06:15:46 +02:00
2a7b4681c6 [godtube] Fix on Python 2.6 2014-09-29 05:51:41 +02:00
8157ae3904 [golem] Fix under 2.6
It's a sad story; 2.6 does not support any non-trivial xpaths.
2014-09-29 05:48:56 +02:00
e50e2fcd4d [br] fix test case 2014-09-29 05:40:20 +02:00
6be451f422 [youtube] Remove swf signature test cases
These files are now 0 Bytes
2014-09-29 05:24:00 +02:00
5e4f06197f [facebook] Fix test case 2014-09-29 05:19:56 +02:00
761e1645e0 [generic] Remove unstable test checksum 2014-09-29 05:18:45 +02:00
8ff14175e2 [sportdeutschland] Fix testcase 2014-09-29 05:17:16 +02:00
dbe3043cd6 [ynet] Fix test checksums 2014-09-29 05:15:42 +02:00
a8eb5a8e61 [generic] Fix testcases 2014-09-29 05:12:57 +02:00
6043f1df4e [ign] Return proper playlist object 2014-09-29 05:05:06 +02:00
12548cd933 [worldstarhiphop] Correct title extraction 2014-09-29 05:02:58 +02:00
2593039522 [vimeo] Use regexps to find description
This fixes descriptions on 2.6 and makes the code simpler.
2014-09-29 04:58:31 +02:00
35d3e63d24 release 2014.09.29.2 2014-09-29 04:49:11 +02:00
27aede9074 [pbs] Add support for series/jwplayer type video (Fixes #3849) 2014-09-29 04:48:50 +02:00
f5b7e6a842 release 2014.09.29.1 2014-09-29 02:04:28 +02:00
a1f934b171 [youtube] Correct language cookie handling 2014-09-29 02:04:16 +02:00
a43ee88c6f release 2014.09.29 2014-09-29 01:51:53 +02:00
e2dce53781 [youtube] Always request webpage in English (Fixes #3844) 2014-09-29 01:39:26 +02:00
1770ed9e86 [thvideo] Simplify (#3848) 2014-09-29 00:38:37 +02:00
457ac58cc7 Merge remote-tracking branch 'diffycat/thvideo-update' 2014-09-29 00:36:55 +02:00
9c44d2429b [vimeo:likes] Support large like lists (Fixes #3847) 2014-09-29 00:36:06 +02:00
d2e32f7df5 Do not use HTML characters in output
This messes up the format when people paste it outside of code tags.
2014-09-29 00:23:43 +02:00
67077b182b [thvideo] Add support for playlists 2014-09-28 23:36:55 +04:00
5f4c318844 [nfl] Support team micro-sites (fixes #3831) 2014-09-28 21:48:26 +03:00
dfee83234b [nfl] Prefer progressive downloads 2014-09-28 19:25:28 +03:00
7f5c0c4a19 [README] Clarify test's md5 filesize (#3846) 2014-09-28 22:10:20 +07:00
4bc77c8417 [README] Use _match_id helper function 2014-09-28 13:52:21 +02:00
22dd3fad86 release 2014.09.28.1 2014-09-28 12:14:25 +02:00
d6e6a42256 [vimeo:likes] Add new extractor (Fixes #3835) 2014-09-28 12:14:16 +02:00
76e7d1e74b [played] Remove unused import 2014-09-28 10:56:36 +02:00
38c4d41b74 [played] Simplify (#3798) 2014-09-28 10:55:27 +02:00
f0b8e3607d Merge remote-tracking branch 'r4mos/played' 2014-09-28 10:52:23 +02:00
51ee08c4bb Remove unused imports 2014-09-28 10:50:43 +02:00
c841789772 [muenchentv] Add thumbnail 2014-09-28 10:49:58 +02:00
c121a75b36 [heise] Add support for description 2014-09-28 10:49:12 +02:00
5a8b77551d [heise] Simplify (#3842) 2014-09-28 10:47:25 +02:00
0217aee154 Merge remote-tracking branch 'd912e3/heise' 2014-09-28 10:36:44 +02:00
b14f3a4c1d [golem] Simplify (#3828) 2014-09-28 10:35:19 +02:00
92f7963f6e Merge remote-tracking branch 'd912e3/golem' 2014-09-28 10:10:34 +02:00
88fbe4c2cc release 2014.09.28 2014-09-28 09:49:42 +02:00
394599f422 [oktoberfesttv] Add new extractor (Fixes #3845) 2014-09-28 09:49:21 +02:00
ed9266db90 [common] Add new helper function _match_id 2014-09-28 09:31:58 +02:00
f4b1c7adb8 [muenchentv] Move live title generation to common 2014-09-28 08:53:52 +02:00
c95eeb7b80 [eitb] Modernize 2014-09-28 08:49:03 +02:00
5e43e3803c Credit @lenaten for ynet (#3840) and sport5 (#3841) 2014-09-28 03:45:15 +07:00
a89435a7a8 [ynet] Improve _VALID_URL 2014-09-28 03:30:41 +07:00
a0a90b3ba1 Merge branch 'lenaten-ynet' 2014-09-28 03:26:47 +07:00
c664182323 [ynet] Remove unused stuff, simplify and improve 2014-09-28 03:26:38 +07:00
6be1cd4ddb Merge branch 'ynet' of https://github.com/lenaten/youtube-dl into lenaten-ynet 2014-09-28 02:56:51 +07:00
ee0d90707a [YoutubeDL] Fix string check for python3 2014-09-28 02:48:41 +07:00
f776d8f608 [sport5] Keep alphanumeric order 2014-09-28 02:35:46 +07:00
b3ac3a51ac Merge branch 'lenaten-sport5' 2014-09-28 02:32:52 +07:00
0b75c2a88b [sport5] Capture error message and improve 2014-09-28 02:31:14 +07:00
7b7518124e [heise] Don't check string type
Before Python 3 could be unicode, so don't check at all.
2014-09-27 21:12:23 +02:00
68b0973046 [YoutubeDL] Expect all kind of strings in urlopen
Now it doesn't fail if req is python2's str
2014-09-28 02:07:42 +07:00
3a203b8bfa Merge branch 'sport5' of https://github.com/lenaten/youtube-dl into lenaten-sport5 2014-09-28 00:55:16 +07:00
70752ccefd [golem] Don't omit positional argument specifiers
Required by Python 2.6.
2014-09-27 19:35:55 +02:00
0155549d6c [heise] Add new extractor 2014-09-27 19:28:01 +02:00
net
b66745288e [sport5] Add new extractor 2014-09-27 20:21:46 +03:00
net
2a1325fdde [ynet] Add new extractor 2014-09-27 20:11:22 +03:00
2f9e8776df [extremetube] Fix extraction 2014-09-27 22:36:53 +07:00
497339fa0e [anysex] Fix extraction 2014-09-27 22:29:27 +07:00
8e6f8051f0 [vbox7] Don't set the extension to 'flv' (fixes #3836) 2014-09-27 10:53:02 +02:00
11b3ce8509 [crunchyroll] Allow to list subtitles (fixes #3805) 2014-09-25 17:57:38 +03:00
6a5af6acb9 [golem] Add new extractor 2014-09-25 16:25:53 +02:00
9a0d98bb40 [vube] Update tests 2014-09-25 20:57:18 +07:00
fbd3162e49 [vube] Add DMCA notice 2014-09-25 20:48:54 +07:00
54e9a4af95 [wat] Skip test 2014-09-25 20:33:11 +07:00
8a32b82e46 [youku] Modernize somewhat 2014-09-25 09:58:09 +02:00
fec02bcc90 [hlsnative] Correct handling when remaining_bytes is None 2014-09-25 09:21:45 +02:00
c6e90caaa6 Merge remote-tracking branch 'naglis/wistia' 2014-09-25 02:03:49 +02:00
4bbf157794 release 2014.09.25 2014-09-25 01:59:45 +02:00
6b08cdf626 [youtube] Support for embedded /p players (Fixes #3821) 2014-09-25 01:59:02 +02:00
b686fc18da [hlsnative] Support test parameter 2014-09-24 14:38:40 +02:00
0b97f3a936 release 2014.09.24.1 2014-09-24 14:17:42 +02:00
eb73f2649f [vevo] Skip SMIL download 2014-09-24 14:17:33 +02:00
f0b5d6af74 [vevo] Support 1080p videos (Fixes #3656) 2014-09-24 14:16:56 +02:00
746c67d72f [wistia] Use API and make more generic 2014-09-20 03:02:11 +03:00
5aa38e75b2 [played] Add new extractor 2014-09-19 22:46:57 +02:00
67 changed files with 1450 additions and 422 deletions

View File

@ -348,21 +348,34 @@ $ youtube-dl --dateafter 20000101 --datebefore 20091231
# FAQ
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
### How do I update youtube-dl?
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version.
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
Alternatively, uninstall the youtube-dl package and follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html). In a pinch, this should do if you used `apt-get` before to install youtube-dl:
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
sudo apt-get remove -y youtube-dl
Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
```
sudo apt-get remove -y youtube-dl
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
sudo chmod a+x /usr/local/bin/youtube-dl
hash -r
```
Again, from then on you'll be able to update with `sudo youtube-dl -U`.
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version. See above for a way to update.
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
@ -442,8 +455,6 @@ If you want to add support for a new site, you can follow this quick list (assum
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -451,7 +462,7 @@ If you want to add support for a new site, you can follow this quick list (assum
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://yourextractor.com/watch/42',
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
'info_dict': {
'id': '42',
'ext': 'mp4',
@ -466,8 +477,7 @@ If you want to add support for a new site, you can follow this quick list (assum
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
# TODO more code goes here, for example ...
webpage = self._download_webpage(url, video_id)

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import errno
import io
import hashlib
@ -12,6 +14,7 @@ from youtube_dl import YoutubeDL
from youtube_dl.utils import (
compat_str,
preferredencoding,
write_string,
)
@ -40,10 +43,10 @@ def report_warning(message):
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
_msg_header = u'\033[0;33mWARNING:\033[0m'
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = u'WARNING:'
output = u'%s %s\n' % (_msg_header, message)
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
@ -103,22 +106,22 @@ def expect_info_dict(self, expected_dict, got_dict):
self.assertTrue(
isinstance(got, compat_str),
u'Expected a %s object, but got %s for field %s' % (
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
match_rex.match(got),
u'field %s (value: %r) should match %r' % (info_field, got, match_str))
'field %s (value: %r) should match %r' % (info_field, got, match_str))
elif isinstance(expected, type):
got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected),
u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field))
else:
got = got_dict.get(info_field)
self.assertEqual(expected, got,
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# Check for the presence of mandatory fields
if got_dict.get('_type') != 'playlist':
@ -126,7 +129,7 @@ def expect_info_dict(self, expected_dict, got_dict):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
@ -134,7 +137,15 @@ def expect_info_dict(self, expected_dict, got_dict):
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'")
else:
return repr(v)
info_dict_str = ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items())
write_string('\n"info_dict": {' + info_dict_str + '}\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (

View File

@ -139,7 +139,9 @@ def generator(test_case):
if is_playlist:
self.assertEqual(res_dict['_type'], 'playlist')
self.assertTrue('entries' in res_dict)
expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
if 'playlist_mincount' in test_case:
assertGreaterEqual(
self,
@ -188,7 +190,7 @@ def generator(test_case):
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
finally:
try_rm_tcs_files()
if is_playlist and res_dict is not None:
if is_playlist and res_dict is not None and res_dict.get('entries'):
# Remove all other files that may have been extracted if the
# extractor returns full results even with extract_flat
res_tcs = [{'info_dict': e} for e in res_dict['entries']]

View File

@ -22,7 +22,8 @@ from youtube_dl.utils import (
fix_xml_ampersands,
get_meta_content,
orderedSet,
PagedList,
OnDemandPagedList,
InAdvancePagedList,
parse_duration,
read_batch_urls,
sanitize_filename,
@ -43,6 +44,7 @@ from youtube_dl.utils import (
limit_length,
escape_rfc3986,
escape_url,
js_to_json,
)
@ -137,6 +139,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
def test_find_xpath_attr(self):
testxml = '''<root>
@ -246,10 +249,14 @@ class TestUtil(unittest.TestCase):
for i in range(firstid, upto):
yield i
pl = PagedList(get_page, pagesize)
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
@ -325,5 +332,28 @@ class TestUtil(unittest.TestCase):
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
if __name__ == '__main__':
unittest.main()

View File

@ -47,18 +47,6 @@ _TESTS = [
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
'swf',
86,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
),
(
'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
'swf',
'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
'9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
'js',

View File

@ -1250,12 +1250,13 @@ class YoutubeDL(object):
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
url = req if isinstance(req, compat_str) else req.get_full_url()
req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
url = req if req_is_string else req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
if isinstance(req, compat_str):
if req_is_string:
req = url_escaped
else:
req = compat_urllib_request.Request(

View File

@ -78,6 +78,7 @@ __authors__ = (
'Hari Padmanaban',
'Carlos Ramos',
'5moufl',
'lenaten',
)
__license__ = 'Public Domain'

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
from .common import FileDownloader
from .hls import HlsFD
from .hls import NativeHlsFD
from .http import HttpFD
from .mplayer import MplayerFD
from .rtmp import RtmpFD
@ -19,6 +20,8 @@ def get_suitable_downloader(info_dict):
if url.startswith('rtmp'):
return RtmpFD
if protocol == 'm3u8_native':
return NativeHlsFD
if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
return HlsFD
if url.startswith('mms') or url.startswith('rtsp'):

View File

@ -42,6 +42,7 @@ class FileDownloader(object):
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):

View File

@ -1,8 +1,13 @@
from __future__ import unicode_literals
import os
import re
import subprocess
from .common import FileDownloader
from ..utils import (
compat_urlparse,
compat_urllib_request,
check_executable,
encodeFilename,
)
@ -43,3 +48,57 @@ class HlsFD(FileDownloader):
self.to_stderr(u"\n")
self.report_error(u'%s exited with code %d' % (program, retval))
return False
class NativeHlsFD(FileDownloader):
""" A more limited implementation that does not require ffmpeg """
def real_download(self, filename, info_dict):
url = info_dict['url']
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
self.to_screen(
'[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
data = self.ydl.urlopen(url).read()
s = data.decode('utf-8', 'ignore')
segment_urls = []
for line in s.splitlines():
line = line.strip()
if line and not line.startswith('#'):
segment_url = (
line
if re.match(r'^https?://', line)
else compat_urlparse.urljoin(url, line))
segment_urls.append(segment_url)
is_test = self.params.get('test', False)
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
byte_counter = 0
with open(tmpfilename, 'wb') as outf:
for i, segurl in enumerate(segment_urls):
self.to_screen(
'[hlsnative] %s: Downloading segment %d / %d' %
(info_dict['id'], i + 1, len(segment_urls)))
seg_req = compat_urllib_request.Request(segurl)
if remaining_bytes is not None:
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
segment = self.ydl.urlopen(seg_req).read()
if remaining_bytes is not None:
segment = segment[:remaining_bytes]
remaining_bytes -= len(segment)
outf.write(segment)
byte_counter += len(segment)
if remaining_bytes is not None and remaining_bytes <= 0:
break
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
})
self.try_rename(tmpfilename, filename)
return True

View File

@ -14,8 +14,6 @@ from ..utils import (
class HttpFD(FileDownloader):
_TEST_FILE_SIZE = 10241
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)

View File

@ -135,12 +135,14 @@ from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .godtube import GodTubeIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .grooveshark import GroovesharkIE
from .hark import HarkIE
from .heise import HeiseIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hornbunny import HornBunnyIE
@ -188,6 +190,7 @@ from .livestream import (
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
@ -261,6 +264,7 @@ from .nrk import (
from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
from .oktoberfesttv import OktoberfestTVIE
from .ooyala import OoyalaIE
from .orf import (
ORFTVthekIE,
@ -271,6 +275,7 @@ from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .photobucket import PhotobucketIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
@ -339,6 +344,7 @@ from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportdeutschland import SportDeutschlandIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
@ -349,6 +355,7 @@ from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
@ -366,7 +373,10 @@ from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tnaflix import TNAFlixIE
from .thvideo import THVideoIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
@ -407,11 +417,12 @@ from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vimeo import (
VimeoIE,
VimeoChannelIE,
VimeoUserIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
@ -450,6 +461,7 @@ from .yahoo import (
YahooNewsIE,
YahooSearchIE,
)
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE

View File

@ -22,8 +22,7 @@ class ABCIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urls_info_json = self._search_regex(

View File

@ -35,7 +35,7 @@ class AnySexIE(InfoExtractor):
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
description = self._html_search_regex(
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
@ -43,7 +43,7 @@ class AnySexIE(InfoExtractor):
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
duration = parse_duration(self._search_regex(
r'<b>Duration:</b> (\d+:\d+)', webpage, 'duration', fatal=False))
r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))

View File

@ -8,8 +8,6 @@ from ..utils import (
determine_ext,
ExtractorError,
qualities,
compat_urllib_parse_urlparse,
compat_urllib_parse,
int_or_none,
parse_duration,
unified_strdate,

View File

@ -86,11 +86,15 @@ class ArteTVPlus7IE(InfoExtractor):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = player_info.get('VDA', '').split(' ')[0]
info_dict = {
'id': player_info['VID'],
'title': player_info['VTI'],
'description': player_info.get('VDE'),
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}

View File

@ -15,13 +15,23 @@ class BandcampIE(InfoExtractor):
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
_TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
'file': '1812978515.mp3',
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
"duration": 9.8485,
'id': '1812978515',
'ext': 'mp3',
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
'duration': 9.8485,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}, {
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
'md5': '2b68e5851514c20efdff2afc5603b8b4',
'info_dict': {
'id': '2650410135',
'ext': 'mp3',
'title': 'Lanius (Battle)',
'uploader': 'Ben Prunty Music',
},
}]
def _real_extract(self, url):
@ -59,9 +69,9 @@ class BandcampIE(InfoExtractor):
raise ExtractorError('No free songs found')
download_link = m_download.group(1)
video_id = re.search(
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
webpage, re.MULTILINE | re.DOTALL).group('id')
video_id = self._search_regex(
r'var TralbumData = {.*?id: (?P<id>\d+),?$',
webpage, 'video id', flags=re.MULTILINE | re.DOTALL)
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
# We get the dictionary of the track from some javascript code

View File

@ -26,6 +26,8 @@ class BRIE(InfoExtractor):
'title': 'Wenn das Traditions-Theater wackelt',
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
'duration': 34,
'uploader': 'BR',
'upload_date': '20140802',
}
},
{
@ -66,8 +68,7 @@ class BRIE(InfoExtractor):
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
display_id = self._match_id(url)
page = self._download_webpage(url, display_id)
xml_url = self._search_regex(
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')

View File

@ -35,7 +35,6 @@ class CliphunterIE(InfoExtractor):
'title': 'Fun Jynx Maze solo',
'thumbnail': 're:^https?://.*\.jpg$',
'age_limit': 18,
'duration': 1317,
}
}
@ -86,14 +85,11 @@ class CliphunterIE(InfoExtractor):
thumbnail = self._search_regex(
r"var\s+mov_thumb\s*=\s*'([^']+)';",
webpage, 'thumbnail', fatal=False)
duration = int_or_none(self._search_regex(
r'pl_dur\s*=\s*([0-9]+)', webpage, 'duration', fatal=False))
return {
'id': video_id,
'title': video_title,
'formats': formats,
'duration': duration,
'age_limit': self._rta_search(webpage),
'thumbnail': thumbnail,
}

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
@ -15,11 +16,13 @@ from ..utils import (
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
clean_html,
compiled_regex_type,
ExtractorError,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
@ -163,6 +166,14 @@ class InfoExtractor(object):
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
@ -323,7 +334,11 @@ class InfoExtractor(object):
try:
return json.loads(json_string)
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
@ -640,7 +655,9 @@ class InfoExtractor(object):
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None):
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None):
formats = [{
'format_id': 'm3u8-meta',
'url': m3u8_url,
@ -651,6 +668,11 @@ class InfoExtractor(object):
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(m3u8_url, video_id)
last_info = None
kv_rex = re.compile(
@ -667,15 +689,17 @@ class InfoExtractor(object):
continue
else:
if last_info is None:
formats.append({'url': line})
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
f = {
'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)),
'url': line.strip(),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
@ -695,6 +719,34 @@ class InfoExtractor(object):
self._sort_formats(formats)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
class SearchInfoExtractor(InfoExtractor):
"""

View File

@ -9,7 +9,7 @@ import xml.etree.ElementTree
from hashlib import sha1
from math import pow, sqrt, floor
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_parse,
@ -26,7 +26,7 @@ from ..aes import (
)
class CrunchyrollIE(InfoExtractor):
class CrunchyrollIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TEST = {
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
@ -271,6 +271,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
else:
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
return {
'id': video_id,
'title': video_title,

View File

@ -29,9 +29,8 @@ class DropboxIE(InfoExtractor):
video_id = mobj.group('id')
fn = compat_urllib_parse_unquote(url_basename(url))
title = os.path.splitext(fn)[0]
video_url = (
re.sub(r'[?&]dl=0', '', url) +
('?' if '?' in url else '&') + 'dl=1')
video_url = re.sub(r'[?&]dl=0', '', url)
video_url += ('?' if '?' not in video_url else '&') + 'dl=1'
return {
'id': video_id,

View File

@ -1,4 +1,6 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -7,20 +9,20 @@ from ..utils import ExtractorError
class EitbIE(InfoExtractor):
IE_NAME = u'eitb.tv'
IE_NAME = 'eitb.tv'
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
_TEST = {
u'add_ie': ['Brightcove'],
u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
u'md5': u'edf4436247185adee3ea18ce64c47998',
u'info_dict': {
u'id': u'2743577154001',
u'ext': u'mp4',
u'title': u'60 minutos (Lasa y Zabala, 30 años)',
'add_ie': ['Brightcove'],
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
'md5': 'edf4436247185adee3ea18ce64c47998',
'info_dict': {
'id': '2743577154001',
'ext': 'mp4',
'title': '60 minutos (Lasa y Zabala, 30 años)',
# All videos from eitb has this description in the brightcove info
u'description': u'.',
u'uploader': u'Euskal Telebista',
'description': '.',
'uploader': 'Euskal Telebista',
},
}
@ -30,7 +32,7 @@ class EitbIE(InfoExtractor):
webpage = self._download_webpage(url, chapter_id)
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
if bc_url is None:
raise ExtractorError(u'Could not extract the Brightcove url')
raise ExtractorError('Could not extract the Brightcove url')
# The BrightcoveExperience object doesn't contain the video id, we set
# it manually
bc_url += '&%40videoPlayer={0}'.format(chapter_id)

View File

@ -14,11 +14,11 @@ class EpornerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
_TEST = {
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
'md5': '3b427ae4b9d60619106de3185c2987cd',
'md5': '39d486f046212d8e1b911c52ab4691f8',
'info_dict': {
'id': '95008',
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
'ext': 'flv',
'ext': 'mp4',
'title': 'Infamous Tiffany Teen Strip Tease Video',
'duration': 194,
'view_count': int,

View File

@ -7,6 +7,7 @@ from ..utils import (
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
str_to_int,
)
@ -20,6 +21,7 @@ class ExtremeTubeIE(InfoExtractor):
'ext': 'mp4',
'title': 'Music Video 14 british euro brit european cumshots swallow',
'uploader': 'unknown',
'view_count': int,
'age_limit': 18,
}
}, {
@ -39,8 +41,12 @@ class ExtremeTubeIE(InfoExtractor):
video_title = self._html_search_regex(
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
uploader = self._html_search_regex(
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
fatal=False)
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
webpage, 'uploader', fatal=False)
view_count = str_to_int(self._html_search_regex(
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
webpage, 'view count', fatal=False))
video_url = compat_urllib_parse.unquote(self._html_search_regex(
r'video_url=(.+?)&amp;', webpage, 'video_url'))
path = compat_urllib_parse_urlparse(video_url).path
@ -51,6 +57,7 @@ class ExtremeTubeIE(InfoExtractor):
'id': video_id,
'title': video_title,
'uploader': uploader,
'view_count': view_count,
'url': video_url,
'format': format,
'format_id': format,

View File

@ -35,7 +35,7 @@ class FacebookIE(InfoExtractor):
'id': '637842556329505',
'ext': 'mp4',
'duration': 38,
'title': 'Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam fin...',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
}
}, {
'note': 'Video without discernible title',

View File

@ -21,7 +21,7 @@ class FunnyOrDieIE(InfoExtractor):
},
}, {
'url': 'http://www.funnyordie.com/embed/e402820827',
'md5': 'ff4d83318f89776ed0250634cfaa8d36',
'md5': '29f4c5e5a61ca39dfd7e8348a75d0aad',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',

View File

@ -155,7 +155,6 @@ class GenericIE(InfoExtractor):
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'md5': '7cf780be104d40fea7bae52eed4a470e',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
@ -180,13 +179,13 @@ class GenericIE(InfoExtractor):
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '981',
'id': '1969',
'ext': 'mp4',
'title': 'My web playroom',
'uploader': 'Ze Frank',
'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# Embeded Ustream video
@ -226,21 +225,6 @@ class GenericIE(InfoExtractor):
'skip_download': 'Requires rtmpdump'
}
},
# smotri embed
{
'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
'md5': 'ec40048448e9284c9a1de77bb188108b',
'info_dict': {
'id': 'v27008541fad',
'ext': 'mp4',
'title': 'Крым и Севастополь вошли в состав России',
'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
'duration': 900,
'upload_date': '20140318',
'uploader': 'rbctv_2012_4',
'uploader_id': 'rbctv_2012_4',
},
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
@ -295,13 +279,13 @@ class GenericIE(InfoExtractor):
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': 'jpSGZsgga_I',
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Launch Trailer',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20130821',
'description': 'md5:87bd95f13d8be3e7da87a5f2c443106a',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
@ -382,14 +366,21 @@ class GenericIE(InfoExtractor):
'thumbnail': 're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '8788b683c777a5cf25621eaf286d0c23',
'info_dict': {
'id': '1cfaf6b7ea',
'ext': 'mov',
'title': 'md5:51364a8d3d009997ba99656004b5e20d',
'duration': 643.0,
'filesize': 182808282,
'uploader': 'education-portal.com',
},
},
]
def report_download_webpage(self, video_id):
"""Report webpage download."""
if not self._downloader.params.get('test', False):
self._downloader.report_warning('Falling back on generic information extractor.')
super(GenericIE, self).report_download_webpage(video_id)
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
@ -489,6 +480,7 @@ class GenericIE(InfoExtractor):
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
@ -531,6 +523,9 @@ class GenericIE(InfoExtractor):
'upload_date': upload_date,
}
if not self._downloader.params.get('test', False) and not is_intentional:
self._downloader.report_warning('Falling back on generic information extractor.')
try:
webpage = self._download_webpage(url, video_id)
except ValueError:
@ -631,7 +626,7 @@ class GenericIE(InfoExtractor):
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v)/.+?)
(?:embed|v|p)/.+?)
\1''', webpage)
if matches:
return _playlist_from_matches(
@ -656,6 +651,16 @@ class GenericIE(InfoExtractor):
'title': video_title,
'id': video_id,
}
match = re.search(r'(?:id=["\']wistia_|data-wistiaid=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return {
'_type': 'url_transparent',
'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
'ie_key': 'Wistia',
'uploader': video_uploader,
'title': video_title,
'id': match.group('id')
}
# Look for embedded blip.tv player
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)

View File

@ -36,16 +36,16 @@ class GodTubeIE(InfoExtractor):
'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
video_id, 'Downloading player config XML')
video_url = config.find('.//file').text
uploader = config.find('.//author').text
timestamp = parse_iso8601(config.find('.//date').text)
duration = parse_duration(config.find('.//duration').text)
thumbnail = config.find('.//image').text
video_url = config.find('file').text
uploader = config.find('author').text
timestamp = parse_iso8601(config.find('date').text)
duration = parse_duration(config.find('duration').text)
thumbnail = config.find('image').text
media = self._download_xml(
'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
title = media.find('.//title').text
title = media.find('title').text
return {
'id': video_id,

View File

@ -0,0 +1,69 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
determine_ext,
)
class GolemIE(InfoExtractor):
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
_TEST = {
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
'info_dict': {
'id': '14095',
'format_id': 'high',
'ext': 'mp4',
'title': 'iPhone 6 und 6 Plus - Test',
'duration': 300.44,
'filesize': 65309548,
}
}
_PREFIX = 'http://video.golem.de'
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
info = {
'id': video_id,
'title': config.findtext('./title', 'golem'),
'duration': self._float(config.findtext('./playtime'), 'duration'),
}
formats = []
for e in config:
url = e.findtext('./url')
if not url:
continue
formats.append({
'format_id': e.tag,
'url': compat_urlparse.urljoin(self._PREFIX, url),
'height': self._int(e.get('height'), 'height'),
'width': self._int(e.get('width'), 'width'),
'filesize': self._int(e.findtext('filesize'), 'filesize'),
'ext': determine_ext(e.findtext('./filename')),
})
self._sort_formats(formats)
info['formats'] = formats
thumbnails = []
for e in config.findall('.//teaser'):
url = e.findtext('./url')
if not url:
continue
thumbnails.append({
'url': compat_urlparse.urljoin(self._PREFIX, url),
'width': self._int(e.get('width'), 'thumbnail width'),
'height': self._int(e.get('height'), 'thumbnail height'),
})
info['thumbnails'] = thumbnails
return info

View File

@ -0,0 +1,81 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
get_meta_content,
parse_iso8601,
)
class HeiseIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:www\.)?heise\.de/video/artikel/
.+?(?P<id>[0-9]+)\.html(?:$|[?#])
'''
_TEST = {
'url': (
'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
),
'md5': 'ffed432483e922e88545ad9f2f15d30e',
'info_dict': {
'id': '2404147',
'ext': 'mp4',
'title': (
"Podcast: c't uplink 3.3 Owncloud / Tastaturen / Peilsender Smartphone"
),
'format_id': 'mp4_720',
'timestamp': 1411812600,
'upload_date': '20140927',
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
json_url = self._search_regex(
r'json_url:\s*"([^"]+)"', webpage, 'json URL')
config = self._download_json(json_url, video_id)
info = {
'id': video_id,
'thumbnail': config.get('poster'),
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
'description': self._og_search_description(webpage),
}
title = get_meta_content('fulltitle', webpage)
if title:
info['title'] = title
elif config.get('title'):
info['title'] = config['title']
else:
info['title'] = self._og_search_title(webpage)
formats = []
for t, rs in config['formats'].items():
if not rs or not hasattr(rs, 'items'):
self._downloader.report_warning(
'formats: {0}: no resolutions'.format(t))
continue
for height_str, obj in rs.items():
format_id = '{0}_{1}'.format(t, height_str)
if not obj or not obj.get('url'):
self._downloader.report_warning(
'formats: {0}: no url'.format(format_id))
continue
formats.append({
'url': obj['url'],
'format_id': format_id,
'height': self._int(height_str, 'height'),
})
self._sort_formats(formats)
info['formats'] = formats
return info

View File

@ -89,7 +89,12 @@ class IGNIE(InfoExtractor):
'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
webpage)
if multiple_urls:
return [self.url_result(u, ie='IGN') for u in multiple_urls]
entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
return {
'_type': 'playlist',
'id': name_or_id,
'entries': entries,
}
video_id = self._find_video_id(webpage)
result = self._get_video_info(video_id)

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -12,12 +14,13 @@ class InternetVideoArchiveIE(InfoExtractor):
_VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
_TEST = {
u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
u'file': u'452693.mp4',
u'info_dict': {
u'title': u'SKYFALL',
u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
u'duration': 153,
'url': 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
'info_dict': {
'id': '452693',
'ext': 'mp4',
'title': 'SKYFALL',
'description': 'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
'duration': 149,
},
}
@ -42,7 +45,7 @@ class InternetVideoArchiveIE(InfoExtractor):
url = self._build_url(query)
flashconfiguration = self._download_xml(url, video_id,
u'Downloading flash configuration')
'Downloading flash configuration')
file_url = flashconfiguration.find('file').text
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
# Replace some of the parameters in the query to get the best quality
@ -51,7 +54,7 @@ class InternetVideoArchiveIE(InfoExtractor):
lambda m: self._clean_query(m.group()),
file_url)
info = self._download_xml(file_url, video_id,
u'Downloading video info')
'Downloading video info')
item = info.find('channel/item')
def _bp(p):

View File

@ -63,7 +63,8 @@ class IzleseneIE(InfoExtractor):
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
thumbnail = self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';",

View File

@ -1,8 +1,6 @@
# coding=utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
@ -12,14 +10,14 @@ from ..utils import (
class JpopsukiIE(InfoExtractor):
IE_NAME = 'jpopsuki.tv'
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/(?:category/)?video/[^/]+/(?P<id>\S+)'
_TEST = {
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
'file': '00be659d23b0b40508169cdee4545771.mp4',
'info_dict': {
'id': '00be659d23b0b40508169cdee4545771',
'ext': 'mp4',
'title': 'ayumi hamasaki - evolution',
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
@ -30,8 +28,7 @@ class JpopsukiIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
@ -47,11 +44,9 @@ class JpopsukiIE(InfoExtractor):
uploader_id = self._html_search_regex(
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
webpage, 'video uploader_id', fatal=False)
upload_date = self._html_search_regex(
upload_date = unified_strdate(self._html_search_regex(
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
fatal=False)
if upload_date is not None:
upload_date = unified_strdate(upload_date)
fatal=False))
view_count_str = self._html_search_regex(
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
fatal=False)

View File

@ -11,10 +11,9 @@ from ..utils import (
class JukeboxIE(InfoExtractor):
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<id>[a-z0-9\-]+)\.html'
_TEST = {
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
'md5': '1574e9b4d6438446d5b7dbcdf2786276',
'info_dict': {
'id': 'r303r',
'ext': 'flv',
@ -24,8 +23,7 @@ class JukeboxIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
video_id = self._match_id(url)
html = self._download_webpage(url, video_id)
iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))

View File

@ -0,0 +1,69 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
js_to_json,
parse_duration,
remove_end,
)
class LRTIE(InfoExtractor):
IE_NAME = 'lrt.lt'
_VALID_URL = r'https?://(?:www\.)?lrt\.lt/mediateka/irasas/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.lrt.lt/mediateka/irasas/54391/',
'info_dict': {
'id': '54391',
'ext': 'mp4',
'title': 'Septynios Kauno dienos',
'description': 'Kauno miesto ir apskrities naujienos',
'duration': 1783,
},
'params': {
'skip_download': True, # HLS download
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = remove_end(self._og_search_title(webpage), ' - LRT')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
duration = parse_duration(self._search_regex(
r"'duration':\s*'([^']+)',", webpage,
'duration', fatal=False, default=None))
formats = []
for js in re.findall(r'(?s)config:\s*(\{.*?\})', webpage):
data = json.loads(js_to_json(js))
if data['provider'] == 'rtmp':
formats.append({
'format_id': 'rtmp',
'ext': determine_ext(data['file']),
'url': data['streamer'],
'play_path': 'mp4:%s' % data['file'],
'preference': -1,
})
else:
formats.extend(
self._extract_m3u8_formats(data['file'], video_id, 'mp4'))
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
}

View File

@ -1,7 +1,6 @@
# coding: utf-8
from __future__ import unicode_literals
import datetime
import json
from .common import InfoExtractor
@ -23,6 +22,7 @@ class MuenchenTVIE(InfoExtractor):
'ext': 'mp4',
'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
'thumbnail': 're:^https?://.*\.jpg$'
},
'params': {
'skip_download': True,
@ -33,9 +33,7 @@ class MuenchenTVIE(InfoExtractor):
display_id = 'live'
webpage = self._download_webpage(url, display_id)
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
title = self._og_search_title(webpage) + ' ' + now_str
title = self._live_title(self._og_search_title(webpage))
data_js = self._search_regex(
r'(?s)\nplaylist:\s*(\[.*?}\]),related:',
@ -73,5 +71,6 @@ class MuenchenTVIE(InfoExtractor):
'title': title,
'formats': formats,
'is_live': True,
'thumbnail': thumbnail,
}

View File

@ -6,6 +6,7 @@ import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_parse_urlparse,
int_or_none,
remove_end,
)
@ -13,76 +14,116 @@ from ..utils import (
class NFLIE(InfoExtractor):
IE_NAME = 'nfl.com'
_VALID_URL = r'(?x)https?://(?:www\.)?nfl\.com/(?:videos/(?:.+)/|.*?\#video=)(?P<id>\d..[0-9]+)'
_PLAYER_CONFIG_URL = 'http://www.nfl.com/static/content/static/config/video/config.json'
_TEST = {
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
# 'md5': '5eb8c40a727dda106d510e5d6ffa79e5', # md5 checksum fluctuates
'info_dict': {
'id': '0ap3000000398478',
'ext': 'mp4',
'title': 'Week 3: Washington Redskins vs. Philadelphia Eagles highlights',
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
'upload_date': '20140921',
'timestamp': 1411337580,
'thumbnail': 're:^https?://.*\.jpg$',
_VALID_URL = r'''(?x)https?://
(?P<host>(?:www\.)?(?:nfl\.com|.*?\.clubs\.nfl\.com))/
(?:.+?/)*
(?P<id>(?:\d[a-z]{2}\d{13}|\w{8}\-(?:\w{4}\-){3}\w{12}))'''
_TESTS = [
{
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights',
'md5': '394ef771ddcd1354f665b471d78ec4c6',
'info_dict': {
'id': '0ap3000000398478',
'ext': 'mp4',
'title': 'Week 3: Redskins vs. Eagles highlights',
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478',
'upload_date': '20140921',
'timestamp': 1411337580,
'thumbnail': 're:^https?://.*\.jpg$',
}
},
{
'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266',
'md5': 'cf85bdb4bc49f6e9d3816d130c78279c',
'info_dict': {
'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266',
'ext': 'mp4',
'title': 'LIVE: Post Game vs. Browns',
'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8',
'upload_date': '20131229',
'timestamp': 1388354455,
'thumbnail': 're:^https?://.*\.jpg$',
}
}
]
@staticmethod
def prepend_host(host, url):
if not url.startswith('http'):
if not url.startswith('/'):
url = '/%s' % url
url = 'http://{0:}{1:}'.format(host, url)
return url
@staticmethod
def format_from_stream(stream, protocol, host, path_prefix='',
preference=0, note=None):
url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
protocol=protocol,
host=host,
prefix=path_prefix,
path=stream.get('path'),
)
return {
'url': url,
'vbr': int_or_none(stream.get('rate', 0), 1000),
'preference': preference,
'format_note': note,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id, host = mobj.group('id'), mobj.group('host')
config = self._download_json(self._PLAYER_CONFIG_URL, video_id,
webpage = self._download_webpage(url, video_id)
config_url = NFLIE.prepend_host(host, self._search_regex(
r'(?:config|configURL)\s*:\s*"([^"]+)"', webpage, 'config URL'))
config = self._download_json(config_url, video_id,
note='Downloading player config')
url_template = 'http://nfl.com{contentURLTemplate:s}'.format(**config)
video_data = self._download_json(url_template.format(id=video_id), video_id)
cdns = config.get('cdns')
if not cdns:
raise ExtractorError('Failed to get CDN data', expected=True)
url_template = NFLIE.prepend_host(
host, '{contentURLTemplate:}'.format(**config))
video_data = self._download_json(
url_template.format(id=video_id), video_id)
formats = []
streams = video_data.get('cdnData', {}).get('bitrateInfo', [])
for name, cdn in cdns.items():
# LimeLight streams don't seem to work
if cdn.get('name') == 'LIMELIGHT':
continue
protocol = cdn.get('protocol')
host = remove_end(cdn.get('host', ''), '/')
if not (protocol and host):
continue
path_prefix = cdn.get('pathprefix', '')
if path_prefix and not path_prefix.endswith('/'):
path_prefix = '%s/' % path_prefix
get_url = lambda p: '{protocol:s}://{host:s}/{prefix:s}{path:}'.format(
protocol=protocol,
host=host,
prefix=path_prefix,
path=p,
)
if protocol == 'rtmp':
preference = -2
elif 'prog' in name.lower():
preference = -1
else:
preference = 0
cdn_data = video_data.get('cdnData', {})
streams = cdn_data.get('bitrateInfo', [])
if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
parts = compat_urllib_parse_urlparse(cdn_data.get('uri'))
protocol, host = parts.scheme, parts.netloc
for stream in streams:
path = stream.get('path')
if not path:
formats.append(
NFLIE.format_from_stream(stream, protocol, host))
else:
cdns = config.get('cdns')
if not cdns:
raise ExtractorError('Failed to get CDN data', expected=True)
for name, cdn in cdns.items():
# LimeLight streams don't seem to work
if cdn.get('name') == 'LIMELIGHT':
continue
formats.append({
'url': get_url(path),
'vbr': int_or_none(stream.get('rate', 0), 1000),
'preference': preference,
'format_note': name,
})
protocol = cdn.get('protocol')
host = remove_end(cdn.get('host', ''), '/')
if not (protocol and host):
continue
prefix = cdn.get('pathprefix', '')
if prefix and not prefix.endswith('/'):
prefix = '%s/' % prefix
preference = 0
if protocol == 'rtmp':
preference = -2
elif 'prog' in name.lower():
preference = 1
for stream in streams:
formats.append(
NFLIE.format_from_stream(stream, protocol, host,
prefix, preference, name))
self._sort_formats(formats)
@ -94,7 +135,7 @@ class NFLIE(InfoExtractor):
return {
'id': video_id,
'title': video_data.get('storyHeadline'),
'title': video_data.get('headline'),
'formats': formats,
'description': video_data.get('caption'),
'duration': video_data.get('duration'),

View File

@ -0,0 +1,47 @@
# encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class OktoberfestTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.oktoberfest-tv\.de/[^/]+/[^/]+/video/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://www.oktoberfest-tv.de/de/kameras/video/hb-zelt',
'info_dict': {
'id': 'hb-zelt',
'ext': 'mp4',
'title': 're:^Live-Kamera: Hofbräuzelt [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'thumbnail': 're:^https?://.*\.jpg$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._live_title(self._html_search_regex(
r'<h1><strong>.*?</strong>(.*?)</h1>', webpage, 'title'))
clip = self._search_regex(
r"clip:\s*\{\s*url:\s*'([^']+)'", webpage, 'clip')
ncurl = self._search_regex(
r"netConnectionUrl:\s*'([^']+)'", webpage, 'rtmp base')
video_url = ncurl + clip
thumbnail = self._search_regex(
r"canvas:\s*\{\s*backgroundImage:\s*'url\(([^)]+)\)'", webpage,
'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'ext': 'mp4',
'is_live': True,
'thumbnail': thumbnail,
}

View File

@ -4,6 +4,7 @@ import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
US_RATINGS,
)
@ -11,10 +12,10 @@ from ..utils import (
class PBSIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://
(?:
# Direct video URL
video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
# Article with embedded player
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+)/?(?:$|[?\#]) |
# Direct video URL
video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? |
# Article with embedded player (or direct video)
(?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) |
# Player
video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/
)
@ -65,10 +66,25 @@ class PBSIE(InfoExtractor):
'duration': 6559,
'thumbnail': 're:^https?://.*\.jpg$',
}
},
{
'url': 'http://www.pbs.org/wgbh/nova/earth/killer-typhoon.html',
'md5': '908f3e5473a693b266b84e25e1cf9703',
'info_dict': {
'id': '2365160389',
'display_id': 'killer-typhoon',
'ext': 'mp4',
'description': 'md5:c741d14e979fc53228c575894094f157',
'title': 'Killer Typhoon',
'duration': 3172,
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140122',
}
}
]
def _extract_ids(self, url):
def _extract_webpage(self, url):
mobj = re.match(self._VALID_URL, url)
presumptive_id = mobj.group('presumptive_id')
@ -76,15 +92,20 @@ class PBSIE(InfoExtractor):
if presumptive_id:
webpage = self._download_webpage(url, display_id)
upload_date = unified_strdate(self._search_regex(
r'<input type="hidden" id="air_date_[0-9]+" value="([^"]+)"',
webpage, 'upload date', default=None))
MEDIA_ID_REGEXES = [
r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed
r'class="coveplayerid">([^<]+)<', # coveplayer
r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer
]
media_id = self._search_regex(
MEDIA_ID_REGEXES, webpage, 'media ID', fatal=False, default=None)
if media_id:
return media_id, presumptive_id
return media_id, presumptive_id, upload_date
url = self._search_regex(
r'<iframe\s+(?:class|id)=["\']partnerPlayer["\'].*?\s+src=["\'](.*?)["\']>',
@ -104,10 +125,10 @@ class PBSIE(InfoExtractor):
video_id = mobj.group('id')
display_id = video_id
return video_id, display_id
return video_id, display_id, None
def _real_extract(self, url):
video_id, display_id = self._extract_ids(url)
video_id, display_id, upload_date = self._extract_webpage(url)
info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id
info = self._download_json(info_url, display_id)
@ -119,6 +140,7 @@ class PBSIE(InfoExtractor):
return {
'id': video_id,
'display_id': display_id,
'title': info['title'],
'url': info['alternate_encoding']['url'],
'ext': 'mp4',
@ -126,4 +148,5 @@ class PBSIE(InfoExtractor):
'thumbnail': info.get('image_url'),
'duration': info.get('duration'),
'age_limit': age_limit,
'upload_date': upload_date,
}

View File

@ -0,0 +1,55 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import os.path
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
compat_urllib_request,
)
class PlayedIE(InfoExtractor):
IE_NAME = 'played.to'
_VALID_URL = r'https?://(?:www\.)?played\.to/(?P<id>[a-zA-Z0-9_-]+)'
_TEST = {
'url': 'http://played.to/j2f2sfiiukgt',
'md5': 'c2bd75a368e82980e7257bf500c00637',
'info_dict': {
'id': 'j2f2sfiiukgt',
'ext': 'flv',
'title': 'youtube-dl_test_video.mp4',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
orig_webpage = self._download_webpage(url, video_id)
fields = re.findall(
r'type="hidden" name="([^"]+)"\s+value="([^"]+)">', orig_webpage)
data = dict(fields)
self._sleep(2, video_id)
post = compat_urllib_parse.urlencode(data)
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
req = compat_urllib_request.Request(url, post, headers)
webpage = self._download_webpage(
req, video_id, note='Downloading video page ...')
title = os.path.splitext(data['fname'])[0]
video_url = self._search_regex(
r'file: "?(.+?)",', webpage, 'video URL')
return {
'id': video_id,
'title': title,
'url': video_url,
}

View File

@ -144,7 +144,7 @@ class ProSiebenSat1IE(InfoExtractor):
'id': '2156342',
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
'description': 'md5:8ba6301e70351ae0bedf8da00f7ba528',
'description': 'Romantischer Kurztrip zum Valentinstag? Wir verraten, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
@ -180,12 +180,10 @@ class ProSiebenSat1IE(InfoExtractor):
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page = self._download_webpage(url, video_id, 'Downloading page')
clip_id = self._html_search_regex(self._CLIPID_REGEXES, page, 'clip id')
clip_id = self._html_search_regex(self._CLIPID_REGEXES, webpage, 'clip id')
access_token = 'testclient'
client_name = 'kolibri-1.2.5'
@ -234,12 +232,12 @@ class ProSiebenSat1IE(InfoExtractor):
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
title = self._html_search_regex(self._TITLE_REGEXES, page, 'title')
description = self._html_search_regex(self._DESCRIPTION_REGEXES, page, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(page)
title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
description = self._html_search_regex(self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._html_search_regex(
self._UPLOAD_DATE_REGEXES, page, 'upload date', default=None))
self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
formats = []

View File

@ -9,7 +9,6 @@ from ..utils import (
compat_urllib_parse,
unified_strdate,
str_to_int,
int_or_none,
)
from ..aes import aes_decrypt_text
@ -40,31 +39,42 @@ class SpankwireIE(InfoExtractor):
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
title = self._html_search_regex(r'<h1>([^<]+)', webpage, 'title')
title = self._html_search_regex(
r'<h1>([^<]+)', webpage, 'title')
description = self._html_search_regex(
r'<div\s+id="descriptionContent">([^<]+)<', webpage, 'description', fatal=False)
r'<div\s+id="descriptionContent">([^<]+)<',
webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'flashvars\.image_url = "([^"]+)', webpage, 'thumbnail', fatal=False)
r'playerData\.screenShot\s*=\s*["\']([^"\']+)["\']',
webpage, 'thumbnail', fatal=False)
uploader = self._html_search_regex(
r'by:\s*<a [^>]*>(.+?)</a>', webpage, 'uploader', fatal=False)
r'by:\s*<a [^>]*>(.+?)</a>',
webpage, 'uploader', fatal=False)
uploader_id = self._html_search_regex(
r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"', webpage, 'uploader id', fatal=False)
upload_date = self._html_search_regex(r'</a> on (.+?) at \d+:\d+', webpage, 'upload date', fatal=False)
if upload_date:
upload_date = unified_strdate(upload_date)
view_count = self._html_search_regex(
r'<div id="viewsCounter"><span>([^<]+)</span> views</div>', webpage, 'view count', fatal=False)
if view_count:
view_count = str_to_int(view_count)
comment_count = int_or_none(self._html_search_regex(
r'<span id="spCommentCount">\s*(\d+)</span> Comments</div>', webpage, 'comment count', fatal=False))
r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"',
webpage, 'uploader id', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r'</a> on (.+?) at \d+:\d+',
webpage, 'upload date', fatal=False))
video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))
view_count = str_to_int(self._html_search_regex(
r'<div id="viewsCounter"><span>([\d,\.]+)</span> views</div>',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
r'Comments<span[^>]+>\s*\(([\d,\.]+)\)</span>',
webpage, 'comment count', fatal=False))
video_urls = list(map(
compat_urllib_parse.unquote,
re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*["\']([^"\']+)["\']', webpage)))
if webpage.find('flashvars\.encrypted = "true"') != -1:
password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, 'password').replace('+', ' ')
video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls))
password = self._html_search_regex(
r'flashvars\.video_title = "([^"]+)',
webpage, 'password').replace('+', ' ')
video_urls = list(map(
lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'),
video_urls))
formats = []
for video_url in video_urls:

View File

@ -0,0 +1,92 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class Sport5IE(InfoExtractor):
_VALID_URL = r'http://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)'
_TESTS = [
{
'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1',
'info_dict': {
'id': 's5-Y59xx1-GUh2',
'ext': 'mp4',
'title': 'ולנסיה-קורדובה 0:3',
'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה',
'duration': 228,
'categories': list,
},
'skip': 'Blocked outside of Israel',
}, {
'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE',
'info_dict': {
'id': 's5-SiXxx1-hKh2',
'ext': 'mp4',
'title': 'GOALS_CELTIC_270914.mp4',
'description': '',
'duration': 87,
'categories': list,
},
'skip': 'Blocked outside of Israel',
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
media_id = mobj.group('id')
webpage = self._download_webpage(url, media_id)
video_id = self._html_search_regex('clipId=([\w-]+)', webpage, 'video id')
metadata = self._download_xml(
'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id,
video_id)
error = metadata.find('./Error')
if error is not None:
raise ExtractorError(
'%s returned error: %s - %s' % (
self.IE_NAME,
error.find('./Name').text,
error.find('./Description').text),
expected=True)
title = metadata.find('./Title').text
description = metadata.find('./Description').text
duration = int(metadata.find('./Duration').text)
posters_el = metadata.find('./PosterLinks')
thumbnails = [{
'url': thumbnail.text,
'width': int(thumbnail.get('width')),
'height': int(thumbnail.get('height')),
} for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else []
categories_el = metadata.find('./Categories')
categories = [
cat.get('name') for cat in categories_el.findall('./Category')
] if categories_el is not None else []
formats = [{
'url': fmt.text,
'ext': 'mp4',
'vbr': int(fmt.get('bitrate')),
'width': int(fmt.get('width')),
'height': int(fmt.get('height')),
} for fmt in metadata.findall('./PlaybackLinks/FileURL')]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'duration': duration,
'categories': categories,
'formats': formats,
}

View File

@ -17,11 +17,11 @@ class SportDeutschlandIE(InfoExtractor):
'info_dict': {
'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
'ext': 'mp4',
'title': 'LIVE: Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
'categories': ['Badminton'],
'view_count': int,
'thumbnail': 're:^https?://.*\.jpg$',
'description': 're:^Die Badminton-WM 2014 aus Kopenhagen LIVE',
'description': 're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV',
'timestamp': int,
'upload_date': 're:^201408[23][0-9]$',
},

View File

@ -39,10 +39,10 @@ class SunPornoIE(InfoExtractor):
r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
duration = parse_duration(self._search_regex(
r'<span>Duration: (\d+:\d+)</span>', webpage, 'duration', fatal=False))
r'Duration:\s*(\d+:\d+)\s*<', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<span class="views">(\d+)</span>', webpage, 'view count', fatal=False))
r'class="views">\s*(\d+)\s*<', webpage, 'view count', fatal=False))
comment_count = int_or_none(self._html_search_regex(
r'(\d+)</b> Comments?', webpage, 'comment count', fatal=False))

View File

@ -0,0 +1,104 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
compat_urllib_request,
float_or_none,
parse_iso8601,
)
class TapelyIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?'
_API_URL = 'http://tape.ly/showtape?id={0:}'
_S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}'
_SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}'
_TESTS = [
{
'url': 'http://tape.ly/my-grief-as-told-by-water',
'info_dict': {
'id': 23952,
'title': 'my grief as told by water',
'thumbnail': 're:^https?://.*\.png$',
'uploader_id': 16484,
'timestamp': 1411848286,
'description': 'For Robin and Ponkers, whom the tides of life have taken out to sea.',
},
'playlist_count': 13,
},
{
'url': 'http://tape.ly/my-grief-as-told-by-water/1',
'md5': '79031f459fdec6530663b854cbc5715c',
'info_dict': {
'id': 258464,
'title': 'Dreaming Awake (My Brightest Diamond)',
'ext': 'm4a',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
playlist_url = self._API_URL.format(display_id)
request = compat_urllib_request.Request(playlist_url)
request.add_header('X-Requested-With', 'XMLHttpRequest')
request.add_header('Accept', 'application/json')
playlist = self._download_json(request, display_id)
tape = playlist['tape']
entries = []
for s in tape['songs']:
song = s['song']
entry = {
'id': song['id'],
'duration': float_or_none(song.get('songduration'), 1000),
'title': song['title'],
}
if song['source'] == 'S3':
entry.update({
'url': self._S3_SONG_URL.format(song['filename']),
})
entries.append(entry)
elif song['source'] == 'YT':
self.to_screen('YouTube video detected')
yt_id = song['filename'].replace('/youtube/', '')
entry.update(self.url_result(yt_id, 'Youtube', video_id=yt_id))
entries.append(entry)
elif song['source'] == 'SC':
self.to_screen('SoundCloud song detected')
sc_url = self._SOUNDCLOUD_SONG_URL.format(song['filename'])
entry.update(self.url_result(sc_url, 'Soundcloud'))
entries.append(entry)
else:
self.report_warning('Unknown song source: %s' % song['source'])
if mobj.group('songnr'):
songnr = int(mobj.group('songnr')) - 1
try:
return entries[songnr]
except IndexError:
raise ExtractorError(
'No song with index: %s' % mobj.group('songnr'),
expected=True)
return {
'_type': 'playlist',
'id': tape['id'],
'display_id': display_id,
'title': tape['name'],
'entries': entries,
'thumbnail': tape.get('image_url'),
'description': clean_html(tape.get('subtext')),
'like_count': tape.get('likescount'),
'uploader_id': tape.get('user_id'),
'timestamp': parse_iso8601(tape.get('published_at')),
}

View File

@ -149,7 +149,7 @@ class TEDIE(SubtitlesInfoExtractor):
thumbnail = 'http://' + thumbnail
return {
'id': video_id,
'title': talk_info['title'],
'title': talk_info['title'].strip(),
'uploader': talk_info['speaker'],
'thumbnail': thumbnail,
'description': self._og_search_description(webpage),

View File

@ -26,8 +26,7 @@ class THVideoIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
# extract download link from mobile player page
webpage_player = self._download_webpage(
@ -57,3 +56,29 @@ class THVideoIE(InfoExtractor):
'description': description,
'upload_date': upload_date
}
class THVideoPlaylistIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P<id>[0-9]+)'
_TEST = {
'url': 'http://thvideo.tv/mylist2',
'info_dict': {
'id': '2',
'title': '幻想万華鏡',
},
'playlist_mincount': 23,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
list_title = self._html_search_regex(
r'<h1 class="show_title">(.*?)<b id', webpage, 'playlist title',
fatal=False)
entries = [
self.url_result('http://thvideo.tv/v/th' + id, 'THVideo')
for id in re.findall(r'<dd><a href="http://thvideo.tv/v/th(\d+)/" target=', webpage)]
return self.playlist_result(entries, playlist_id, list_title)

View File

@ -17,16 +17,16 @@ class TvigleIE(InfoExtractor):
_TESTS = [
{
'url': 'http://www.tvigle.ru/video/brat-2/',
'md5': '72cb7eab33e54314e1790da402d3c9c3',
'url': 'http://www.tvigle.ru/video/brat/',
'md5': 'ff4344a4894b0524441fb6f8218dc716',
'info_dict': {
'id': '5119390',
'display_id': 'brat-2',
'id': '5118490',
'display_id': 'brat',
'ext': 'mp4',
'title': 'Брат 2 ',
'description': 'md5:5751f4fe345a58e1692585c361294bd8',
'duration': 7356.369,
'age_limit': 0,
'title': 'Брат',
'description': 'md5:d16ac7c0b47052ea51fddb92c4e413eb',
'duration': 5722.6,
'age_limit': 16,
},
},
{
@ -71,6 +71,7 @@ class TvigleIE(InfoExtractor):
'format_id': '%s-%s' % (vcodec, quality),
'vcodec': vcodec,
'height': int(quality[:-1]),
'filesize': item['video_files_size'][vcodec][quality],
})
self._sort_formats(formats)

View File

@ -19,7 +19,7 @@ class Vbox7IE(InfoExtractor):
'md5': '99f65c0c9ef9b682b97313e052734c3f',
'info_dict': {
'id': '249bb972c2',
'ext': 'flv',
'ext': 'mp4',
'title': 'Смях! Чудо - чист за секунди - Скрита камера',
},
}
@ -50,7 +50,6 @@ class Vbox7IE(InfoExtractor):
return {
'id': video_id,
'url': final_url,
'ext': 'flv',
'title': title,
'thumbnail': thumbnail_url,
}

View File

@ -5,7 +5,7 @@ import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
compat_HTTPError,
compat_urllib_request,
ExtractorError,
)
@ -24,7 +24,7 @@ class VevoIE(InfoExtractor):
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
"md5": "06bea460acb744eab74a9d7dcb4bfd61",
"md5": "95ee28ee45e70130e3ab02b0f579ae23",
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
@ -40,7 +40,7 @@ class VevoIE(InfoExtractor):
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
'md5': '893ec0e0d4426a1d96c01de8f2bdff58',
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
@ -69,6 +69,21 @@ class VevoIE(InfoExtractor):
}]
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
def _real_initialize(self):
req = compat_urllib_request.Request(
'http://www.vevo.com/auth', data=b'')
webpage = self._download_webpage(
req, None,
note='Retrieving oauth token',
errnote='Unable to retrieve oauth token',
fatal=False)
if webpage is False:
self._oauth_token = None
else:
self._oauth_token = self._search_regex(
r'access_token":\s*"([^"]+)"',
webpage, 'access token', fatal=False)
def _formats_from_json(self, video_info):
last_version = {'version': -1}
for version in video_info['videoVersions']:
@ -129,6 +144,26 @@ class VevoIE(InfoExtractor):
})
return formats
def _download_api_formats(self, video_id):
if not self._oauth_token:
self._downloader.report_warning(
'No oauth token available, skipping API HLS download')
return []
api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
video_id, self._oauth_token)
api_data = self._download_json(
api_url, video_id,
note='Downloading HLS formats',
errnote='Failed to download HLS format list', fatal=False)
if api_data is None:
return []
m3u8_url = api_data[0]['url']
return self._extract_m3u8_formats(
m3u8_url, video_id, entry_protocol='m3u8_native', ext='mp4',
preference=0)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
@ -152,30 +187,8 @@ class VevoIE(InfoExtractor):
else:
age_limit = None
# Download SMIL
smil_blocks = sorted((
f for f in video_info['videoVersions']
if f['sourceType'] == 13),
key=lambda f: f['version'])
smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
self._SMIL_BASE_URL, video_id, video_id.lower())
if smil_blocks:
smil_url_m = self._search_regex(
r'url="([^"]+)"', smil_blocks[-1]['data'], 'SMIL URL',
fatal=False)
if smil_url_m is not None:
smil_url = smil_url_m
try:
smil_xml = self._download_webpage(smil_url, video_id,
'Downloading SMIL info')
formats.extend(self._formats_from_smil(smil_xml))
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError):
raise
self._downloader.report_warning(
'Cannot download SMIL information, falling back to JSON ..')
# Download via HLS API
formats.extend(self._download_api_formats(video_id))
self._sort_formats(formats)
timestamp_ms = int(self._search_regex(

View File

@ -31,7 +31,7 @@ class VGTVIE(InfoExtractor):
'url': 'http://www.vgtv.no/#!/live/100764/opptak-vgtv-foelger-em-kvalifiseringen',
'info_dict': {
'id': '100764',
'ext': 'mp4',
'ext': 'flv',
'title': 'OPPTAK: VGTV følger EM-kvalifiseringen',
'description': 'md5:3772d9c0dc2dff92a886b60039a7d4d3',
'thumbnail': 're:^https?://.*\.jpg',
@ -50,7 +50,7 @@ class VGTVIE(InfoExtractor):
'url': 'http://www.vgtv.no/#!/live/100015/direkte-her-kan-du-se-laksen-live-fra-suldalslaagen',
'info_dict': {
'id': '100015',
'ext': 'mp4',
'ext': 'flv',
'title': 'DIREKTE: Her kan du se laksen live fra Suldalslågen!',
'description': 'md5:9a60cc23fa349f761628924e56eeec2d',
'thumbnail': 're:^https?://.*\.jpg',

View File

@ -8,17 +8,19 @@ import itertools
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
clean_html,
compat_HTTPError,
compat_urllib_parse,
compat_urllib_request,
clean_html,
get_element_by_attribute,
compat_urlparse,
ExtractorError,
get_element_by_attribute,
InAdvancePagedList,
int_or_none,
RegexNotFoundError,
std_headers,
unsmuggle_url,
urlencode_postdata,
int_or_none,
)
@ -89,6 +91,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:380943ec71b89736ff4bf27183233d09',
'duration': 1595,
},
},
@ -103,6 +106,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
'uploader': 'The BLN & Business of Software',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
},
{
@ -117,6 +121,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
},
'params': {
'videopassword': 'youtube-dl',
@ -203,6 +208,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
orig_url = url
if mobj.group('pro') or mobj.group('player'):
url = 'http://player.vimeo.com/video/' + video_id
@ -273,18 +279,23 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
# Extract video description
video_description = None
try:
video_description = get_element_by_attribute("class", "description_wrapper", webpage)
if video_description:
video_description = clean_html(video_description)
except AssertionError as err:
# On some pages like (http://player.vimeo.com/video/54469442) the
# html tags are not closed, python 2.6 cannot handle it
if err.args[0] == 'we should not get here!':
pass
else:
raise
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
'description', webpage, default=None)
if not video_description and mobj.group('pro'):
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not mobj.group('player'):
self._downloader.report_warning('Cannot find video description')
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
@ -529,3 +540,58 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
class VimeoLikesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TEST = {
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
"info_dict": {
"description": "See all the videos urza likes",
"title": 'Videos urza likes',
},
}
def _real_extract(self, url):
user_id = self._match_id(url)
webpage = self._download_webpage(url, user_id)
page_count = self._int(
self._search_regex(
r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
.*?</a></li>\s*<li\s+class="pagination_next">
''', webpage, 'page count'),
'page count', fatal=True)
PAGE_SIZE = 12
title = self._html_search_regex(
r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
description = self._html_search_meta('description', webpage)
def _get_page(idx):
page_url = '%s//vimeo.com/user%s/likes/page:%d/sort:date' % (
self.http_scheme(), user_id, idx + 1)
webpage = self._download_webpage(
page_url, user_id,
note='Downloading page %d/%d' % (idx + 1, page_count))
video_list = self._search_regex(
r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
webpage, 'video content')
paths = re.findall(
r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
for path in paths:
yield {
'_type': 'url',
'url': compat_urlparse.urljoin(page_url, path),
}
pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
return {
'_type': 'playlist',
'id': 'user%s_likes' % user_id,
'title': title,
'description': description,
'entries': pl,
}

View File

@ -6,6 +6,7 @@ from .common import InfoExtractor
from ..utils import (
int_or_none,
compat_str,
ExtractorError,
)
@ -16,6 +17,24 @@ class VubeIE(InfoExtractor):
_TESTS = [
{
'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
'info_dict': {
'id': 'Y8NUZ69Tf7',
'ext': 'mp4',
'title': 'Best Drummer Ever [HD]',
'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'William',
'timestamp': 1406876915,
'upload_date': '20140801',
'duration': 258.051,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
},
}, {
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
'md5': 'db7aba89d4603dadd627e9d1973946fe',
'info_dict': {
@ -32,7 +51,8 @@ class VubeIE(InfoExtractor):
'dislike_count': int,
'comment_count': int,
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
}
},
'skip': 'Removed due to DMCA',
},
{
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
@ -51,7 +71,8 @@ class VubeIE(InfoExtractor):
'dislike_count': int,
'comment_count': int,
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
}
},
'skip': 'Removed due to DMCA',
}, {
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
'md5': '0584fc13b50f887127d9d1007589d27f',
@ -69,7 +90,8 @@ class VubeIE(InfoExtractor):
'dislike_count': int,
'comment_count': int,
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
}
},
'skip': 'Removed due to DMCA',
}
]
@ -102,6 +124,11 @@ class VubeIE(InfoExtractor):
self._sort_formats(formats)
if not formats and video.get('vst') == 'dmca':
raise ExtractorError(
'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
expected=True)
title = video['title']
description = video.get('description')
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')

View File

@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse_urlparse,
ExtractorError,
parse_duration,
qualities,
)
@ -14,13 +15,12 @@ class VuClipIE(InfoExtractor):
_VALID_URL = r'http://(?:m\.)?vuclip\.com/w\?.*?cid=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://m.vuclip.com/w?cid=843902317&fid=63532&z=1007&nvar&frm=index.html&bu=4757321434',
'md5': '92ac9d1ccefec4f0bb474661ab144fcf',
'url': 'http://m.vuclip.com/w?cid=922692425&fid=70295&z=1010&nvar&frm=index.html',
'info_dict': {
'id': '843902317',
'id': '922692425',
'ext': '3gp',
'title': 'Movie Trailer: Noah',
'duration': 139,
'title': 'The Toy Soldiers - Hollywood Movie Trailer',
'duration': 180,
}
}
@ -37,16 +37,32 @@ class VuClipIE(InfoExtractor):
webpage = self._download_webpage(
adfree_url, video_id, note='Download post-ad page')
error_msg = self._html_search_regex(
r'<p class="message">(.*?)</p>', webpage, 'error message',
default=None)
if error_msg:
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, error_msg), expected=True)
# These clowns alternate between two page types
links_code = self._search_regex(
r'(?s)<div class="social align_c".*?>(.*?)<hr\s*/?>', webpage,
'links')
r'''(?xs)
(?:
<img\s+src="/im/play.gif".*?>|
<!--\ player\ end\ -->\s*</div><!--\ thumb\ end-->
)
(.*?)
(?:
<a\s+href="fblike|<div\s+class="social">
)
''', webpage, 'links')
title = self._html_search_regex(
r'<title>(.*?)-\s*Vuclip</title>', webpage, 'title').strip()
quality_order = qualities(['Reg', 'Hi'])
formats = []
for url, q in re.findall(
r'<a href="(?P<url>[^"]+)".*?>(?P<q>[^<]+)</a>', links_code):
r'<a\s+href="(?P<url>[^"]+)".*?>(?:<button[^>]*>)?(?P<q>[^<]+)(?:</button>)?</a>', links_code):
format_id = compat_urllib_parse_urlparse(url).scheme + '-' + q
formats.append({
'format_id': format_id,
@ -56,7 +72,7 @@ class VuClipIE(InfoExtractor):
self._sort_formats(formats)
duration = parse_duration(self._search_regex(
r'\(([0-9:]+)\)</span></h1>', webpage, 'duration', fatal=False))
r'\(([0-9:]+)\)</span>', webpage, 'duration', fatal=False))
return {
'id': video_id,

View File

@ -40,6 +40,7 @@ class WatIE(InfoExtractor):
'upload_date': '20140816',
'duration': 2910,
},
'skip': "Ce contenu n'est pas disponible pour l'instant.",
},
]

View File

@ -1,13 +1,14 @@
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import ExtractorError, compat_urllib_request
class WistiaIE(InfoExtractor):
_VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
_API_URL = 'http://fast.wistia.com/embed/medias/{0:}.json'
_TEST = {
'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
@ -24,11 +25,13 @@ class WistiaIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
data_json = self._html_search_regex(
r'Wistia\.iframeInit\((.*?), {}\);', webpage, 'video data')
data = json.loads(data_json)
request = compat_urllib_request.Request(self._API_URL.format(video_id))
request.add_header('Referer', url) # Some videos require this.
data_json = self._download_json(request, video_id)
if data_json.get('error'):
raise ExtractorError('Error while getting the playlist',
expected=True)
data = data_json['media']
formats = []
thumbnails = []

View File

@ -13,37 +13,35 @@ class WorldStarHipHopIE(InfoExtractor):
"info_dict": {
"id": "wshh6a7q1ny0G34ZwuIO",
"ext": "mp4",
"title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
"title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
}
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
webpage_src = self._download_webpage(url, video_id)
m_vevo_id = re.search(r'videoId=(.*?)&amp?',
webpage_src)
m_vevo_id = re.search(r'videoId=(.*?)&amp?', webpage)
if m_vevo_id is not None:
return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo')
video_url = self._search_regex(
r'so\.addVariable\("file","(.*?)"\)', webpage_src, 'video URL')
r'so\.addVariable\("file","(.*?)"\)', webpage, 'video URL')
if 'youtube' in video_url:
return self.url_result(video_url, ie='Youtube')
video_title = self._html_search_regex(
r"<title>(.*)</title>", webpage_src, 'title')
r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>',
webpage, 'title')
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
thumbnail = self._html_search_regex(
r'rel="image_src" href="(.*)" />', webpage_src, 'thumbnail',
r'rel="image_src" href="(.*)" />', webpage, 'thumbnail',
fatal=False)
if not thumbnail:
_title = r"""candytitles.*>(.*)</span>"""
mobj = re.search(_title, webpage_src)
_title = r'candytitles.*>(.*)</span>'
mobj = re.search(_title, webpage)
if mobj is not None:
video_title = mobj.group(1)

View File

@ -37,16 +37,6 @@ class YahooIE(InfoExtractor):
'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
},
},
{
'url': 'https://movies.yahoo.com/video/world-loves-spider-man-190819223.html',
'md5': '410b7104aa9893b765bc22787a22f3d9',
'info_dict': {
'id': '516ed8e2-2c4f-339f-a211-7a8b49d30845',
'ext': 'mp4',
'title': 'The World Loves Spider-Man',
'description': '''People all over the world are celebrating the release of \"The Amazing Spider-Man 2.\" We're taking a look at the enthusiastic response Spider-Man has received from viewers all over the world.''',
}
},
{
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
'md5': '60e8ac193d8fb71997caa8fce54c6460',

View File

@ -0,0 +1,52 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import compat_urllib_parse
class YnetIE(InfoExtractor):
_VALID_URL = r'http://(?:.+?\.)?ynet\.co\.il/(?:.+?/)?0,7340,(?P<id>L(?:-[0-9]+)+),00\.html'
_TESTS = [
{
'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
'md5': '4b29cb57c3dddd57642b3f051f535b07',
'info_dict': {
'id': 'L-11659-99244',
'ext': 'flv',
'title': 'איש לא יודע מאיפה באנו',
'thumbnail': 're:^https?://.*\.jpg',
}
}, {
'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
'md5': '8194c2ea221e9a639cac96b6b0753dc5',
'info_dict': {
'id': 'L-8859-84418',
'ext': 'flv',
'title': "צפו: הנשיקה הלוהטת של תורגי' ויוליה פלוטקין",
'thumbnail': 're:^https?://.*\.jpg',
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
content = compat_urllib_parse.unquote_plus(self._og_search_video_url(webpage))
config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
f4m_url = config['clip']['url']
title = self._og_search_title(webpage)
m = re.search(r'ynet - HOT -- (["\']+)(?P<title>.+?)\1', title)
if m:
title = m.group('title')
return {
'id': video_id,
'title': title,
'formats': self._extract_f4m_formats(f4m_url, video_id),
'thumbnail': self._og_search_thumbnail(webpage),
}

View File

@ -1,6 +1,7 @@
# coding: utf-8
import json
from __future__ import unicode_literals
import math
import random
import re
@ -13,18 +14,25 @@ from ..utils import (
class YoukuIE(InfoExtractor):
_VALID_URL = r'(?:(?:http://)?(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|youku:)(?P<ID>[A-Za-z0-9]+)(?:\.html|/v\.swf|)'
_TEST = {
u"url": u"http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
u"file": u"XNDgyMDQ2NTQw_part00.flv",
u"md5": u"ffe3f2e435663dc2d1eea34faeff5b5b",
u"params": {u"test": False},
u"info_dict": {
u"title": u"youtube-dl test video \"'/\\ä↭𝕐"
_VALID_URL = r'''(?x)
(?:
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TEST = {
'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html',
'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b',
'params': {
'test': False
},
'info_dict': {
'id': 'XNDgyMDQ2NTQw_part00',
'ext': 'flv',
'title': 'youtube-dl test video "\'/\\ä↭𝕐'
}
}
def _gen_sid(self):
nowTime = int(time.time() * 1000)
random1 = random.randint(1000,1998)
@ -55,49 +63,42 @@ class YoukuIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
video_id = mobj.group('ID')
video_id = mobj.group('id')
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
jsondata = self._download_webpage(info_url, video_id)
config = self._download_json(info_url, video_id)
self.report_extraction(video_id)
try:
config = json.loads(jsondata)
error_code = config['data'][0].get('error_code')
if error_code:
# -8 means blocked outside China.
error = config['data'][0].get('error') # Chinese and English, separated by newline.
raise ExtractorError(error or u'Server reported error %i' % error_code,
expected=True)
error_code = config['data'][0].get('error_code')
if error_code:
# -8 means blocked outside China.
error = config['data'][0].get('error') # Chinese and English, separated by newline.
raise ExtractorError(error or 'Server reported error %i' % error_code,
expected=True)
video_title = config['data'][0]['title']
seed = config['data'][0]['seed']
video_title = config['data'][0]['title']
seed = config['data'][0]['seed']
format = self._downloader.params.get('format', None)
supported_format = list(config['data'][0]['streamfileids'].keys())
format = self._downloader.params.get('format', None)
supported_format = list(config['data'][0]['streamfileids'].keys())
if format is None or format == 'best':
if 'hd2' in supported_format:
format = 'hd2'
else:
format = 'flv'
ext = u'flv'
elif format == 'worst':
format = 'mp4'
ext = u'mp4'
# TODO proper format selection
if format is None or format == 'best':
if 'hd2' in supported_format:
format = 'hd2'
else:
format = 'flv'
ext = u'flv'
ext = 'flv'
elif format == 'worst':
format = 'mp4'
ext = 'mp4'
else:
format = 'flv'
ext = 'flv'
fileid = config['data'][0]['streamfileids'][format]
keys = [s['k'] for s in config['data'][0]['segs'][format]]
# segs is usually a dictionary, but an empty *list* if an error occured.
except (UnicodeDecodeError, ValueError, KeyError):
raise ExtractorError(u'Unable to extract info section')
fileid = config['data'][0]['streamfileids'][format]
keys = [s['k'] for s in config['data'][0]['segs'][format]]
# segs is usually a dictionary, but an empty *list* if an error occured.
files_info=[]
sid = self._gen_sid()
@ -106,9 +107,8 @@ class YoukuIE(InfoExtractor):
#column 8,9 of fileid represent the segment number
#fileid[7:9] should be changed
for index, key in enumerate(keys):
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
info = {
'id': '%s_part%02d' % (video_id, index),

View File

@ -26,7 +26,7 @@ from ..utils import (
get_element_by_attribute,
ExtractorError,
int_or_none,
PagedList,
OnDemandPagedList,
unescapeHTML,
unified_strdate,
orderedSet,
@ -655,6 +655,16 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
pref_cookies = [
c for c in self._downloader.cookiejar
if c.domain == '.youtube.com' and c.name == 'PREF']
for pc in pref_cookies:
if 'hl=' in pc.value:
pc.value = re.sub(r'hl=[^&]+', 'hl=en', pc.value)
else:
if pc.value:
pc.value += '&'
pc.value += 'hl=en'
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
@ -1068,6 +1078,13 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
'info_dict': {
'title': 'JODA15',
}
}, {
'note': 'Embedded SWF player',
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
}
}]
def _real_initialize(self):
@ -1334,7 +1351,7 @@ class YoutubeUserIE(InfoExtractor):
'id': video_id,
'title': title,
}
url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
return self.playlist_result(url_results, playlist_title=username)

View File

@ -87,7 +87,7 @@ def parseOpts(overrideArguments=None):
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
try:
i = opts.index(private_opt)
opts[i+1] = '<PRIVATE>'
opts[i+1] = 'PRIVATE'
except ValueError:
pass
return opts

View File

@ -673,6 +673,8 @@ class ExtractorError(Exception):
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += u' (caused by %r)' % cause
if not expected:
msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.'
super(ExtractorError, self).__init__(msg)
@ -799,6 +801,12 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
del req.headers['User-agent']
req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
del req.headers['Youtubedl-user-agent']
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
@ -884,6 +892,7 @@ def unified_strdate(date_str):
'%d/%m/%Y',
'%d/%m/%y',
'%Y/%m/%d %H:%M:%S',
'%d/%m/%Y %H:%M:%S',
'%Y-%m-%d %H:%M:%S',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
@ -1384,14 +1393,16 @@ def check_executable(exe, args=[]):
class PagedList(object):
def __init__(self, pagefunc, pagesize):
self._pagefunc = pagefunc
self._pagesize = pagesize
def __len__(self):
# This is only useful for tests
return len(self.getslice())
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize):
self._pagefunc = pagefunc
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
@ -1430,6 +1441,35 @@ class PagedList(object):
return res
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagefunc = pagefunc
self._pagecount = pagecount
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page = list(self._pagefunc(pagenum))
if skip_elems:
page = page[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page) < only_more:
only_more -= len(page)
else:
page = page[:only_more]
res.extend(page)
break
res.extend(page)
return res
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
@ -1540,27 +1580,24 @@ def strip_jsonp(code):
def js_to_json(code):
def fix_kv(m):
key = m.group(2)
if key.startswith("'"):
assert key.endswith("'")
assert '"' not in key
key = '"%s"' % key[1:-1]
elif not key.startswith('"'):
key = '"%s"' % key
value = m.group(4)
if value.startswith("'"):
assert value.endswith("'")
assert '"' not in value
value = '"%s"' % value[1:-1]
return m.group(1) + key + m.group(3) + value
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
if v.startswith('"'):
return v
if v.startswith("'"):
v = v[1:-1]
v = re.sub(r"\\\\|\\'|\"", lambda m: {
'\\\\': '\\\\',
"\\'": "'",
'"': '\\"',
}[m.group(0)], v)
return '"%s"' % v
res = re.sub(r'''(?x)
([{,]\s*)
("[^"]*"|\'[^\']*\'|[a-z0-9A-Z]+)
(:\s*)
([0-9.]+|true|false|"[^"]*"|\'[^\']*\'|\[|\{)
"(?:[^"\\]*(?:\\\\|\\")?)*"|
'(?:[^'\\]*(?:\\\\|\\')?)*'|
[a-zA-Z_][a-zA-Z_0-9]*
''', fix_kv, code)
res = re.sub(r',(\s*\])', lambda m: m.group(1), res)
return res

View File

@ -1,2 +1,2 @@
__version__ = '2014.09.24'
__version__ = '2014.10.02'