Compare commits

..

548 Commits

Author SHA1 Message Date
9b36dcbd65 release 2014.10.13 2014-10-13 10:12:51 +02:00
2aefb886fa [ffmpeg] Improve format merging (Closes #3935) 2014-10-13 10:12:43 +02:00
72961c2a8a Merge remote-tracking branch 'Dineshs91/KontrTube-fix' 2014-10-13 10:09:57 +02:00
4c1ce987b0 [huffpost] Modernize 2014-10-13 10:08:59 +02:00
8a2300a597 [kontrtube] Fix video title extraction 2014-10-13 10:03:55 +05:30
1cc887cbf0 [youtube] Add support for format 278 2014-10-13 00:09:19 +02:00
203fb43f36 [youtube] Download DASH manifest by default (Closes #3887) 2014-10-13 00:03:08 +02:00
4d7b03f1f2 [zsh-completion] Ignore generated file (#3890) 2014-10-12 23:31:15 +02:00
72ebb5e4b4 Merge remote-tracking branch 'xu-cheng/zsh-completion' 2014-10-12 23:30:17 +02:00
8450c15c25 [options] Consistent formatting and general niceness 2014-10-12 23:10:11 +02:00
b88b45e46c [options] Mention login in --username documentation (#3753) 2014-10-12 22:45:36 +02:00
2417dc1715 release 2014.10.12 2014-10-12 22:25:18 +02:00
23d83ad4d5 [niconico] Fix ignored --netrc flag
See issue #3753
2014-10-12 23:18:42 +03:00
772ece3571 Merge pull request #3932 from Dineshs91/ndr-fix
Description changed
2014-10-12 17:54:51 +02:00
2c9f31188b Description changed 2014-10-12 20:09:12 +05:30
d18be55533 [theonion] Fix a small mistake in string formatting 2014-10-12 15:47:31 +03:00
ac20fc047a [theonion] Add new extractor (closes #3928) 2014-10-12 15:42:35 +03:00
b4c3c8c172 [mixcloud] Fix metadata extraction (fixes #3930) 2014-10-12 13:06:31 +02:00
3357110a4c [vimeo] Make the protocol mandatory in the url (fixes #3926)
If it's missing, it will be correctly handled by the generic IE.
2014-10-11 22:26:26 +02:00
e29fdedb45 Merge pull request #3923 from Dineshs91/howstuffworks-fix
Replace 404 url
2014-10-12 01:48:11 +07:00
4828703f14 [googleplus] Modernize and extract all formats 2014-10-12 01:44:13 +07:00
afe08e0d4a Merge pull request #3924 from Dineshs91/googleplus-fix
Fix download error in GooglePlus
2014-10-12 00:48:58 +07:00
071420e136 Fix download error in GooglePlus 2014-10-11 21:10:53 +05:30
f4cf848d1d Replace 404 url 2014-10-11 15:59:42 +05:30
b7b2ca6e2b Merge pull request #3921 from Dineshs91/ndr-fix
Fix ndr.de outdated test url
2014-10-11 16:07:39 +07:00
1409704afa Fix ndr.de outdated test url 2014-10-11 12:20:13 +05:30
c8e390c2b0 Merge pull request #3911 from Dockheas23/master
KeyError on initialising YoutubeDL in python3 #3910
2014-10-10 22:28:18 +07:00
823f1e015a [yahoo] Wipe out yahoo news extractor 2014-10-10 22:18:37 +07:00
3c06d3715e [yahoo] Generalize, support arbitrary subdomains, support iframe videos, capture error message (Closes #2470) 2014-10-10 22:11:30 +07:00
762958d5af [yahoo] Add support for regional subdomains and extract duration (Closes #3915) 2014-10-10 19:50:29 +07:00
53d9009bdb KeyError on initialising YoutubeDL in python3 #3910 2014-10-10 10:03:24 +01:00
1b725173a5 Fixed typo 2014-10-10 09:35:41 +01:00
0ca41c3d9c [walla] Fix typo 2014-10-09 21:10:07 +07:00
fc6861b175 [sportbox] Add extractor (Closes #3906) 2014-10-09 21:05:39 +07:00
b097b5f246 [mlb] Remove unused import 2014-10-09 20:07:34 +07:00
385009fc44 [mlb] Fix thumbnails extraction (Closes #3905) 2014-10-09 19:56:55 +07:00
ced659bb4d [generic] Ignore some non-video file extensions during generic extraction (Closes #3900) 2014-10-09 19:26:23 +07:00
842cca7d56 [pornhd] Fix formats extraction (Closes #3898) 2014-10-08 20:08:29 +07:00
b3826f6c8d Merge branch 'lenaten-walla' 2014-10-07 22:23:22 +07:00
7bc8780c57 [walla] Fix extractor and add subtitle tests 2014-10-07 22:23:05 +07:00
c59c3c84ed Merge branch 'walla' of https://github.com/lenaten/youtube-dl into lenaten-walla 2014-10-07 20:24:52 +07:00
24f7fb5e1e add zsh completion support into Makefile 2014-10-07 13:19:59 +08:00
3b700f8d43 support zsh completion 2014-10-07 13:19:59 +08:00
net
31d06400ec add missed init file 2014-10-06 03:03:05 +03:00
642b76ac15 release 2014.10.05.2 2014-10-05 22:04:02 +02:00
4c4de296d4 release 2014.10.05.1 2014-10-05 22:00:16 +02:00
b10609d98c [dailymotion] Alternative title search (Fixes #3882) 2014-10-05 21:59:53 +02:00
3ae165aa10 [gorillavid] Add check for non existing videos 2014-10-06 01:48:01 +07:00
e4b85e35d0 [gorillavid] Fix title extraction and make thumbnail optional (Closes #3884) 2014-10-06 01:47:22 +07:00
bb0c206f59 release 2014.10.05 2014-10-05 07:53:25 +02:00
b81f484b60 [gorillavid] Add support for movpod.in (Fixes #3881) 2014-10-05 07:53:02 +02:00
5e69192ef7 [thesixtyone] Add new extractor (closes #3781) 2014-10-04 22:40:36 +03:00
e9be9a6acd [utils] Add additional format to unified_strdate 2014-10-04 22:38:23 +03:00
f47754f061 [globo] Initial extractor implementation (Closes #3823) 2014-10-04 18:56:36 +07:00
d838b1bd4a [utils] Default age_limit to None
If we can't parse it, it means we don't have any information, not that the content is unrestricted.
2014-10-03 20:17:12 +02:00
fe506288bd [planetaplay] Add new extractor (closes #3839) 2014-10-03 19:43:36 +03:00
d397c0b3dd [breakcom] Extract all formats 2014-10-03 19:37:47 +07:00
146c80e256 [utils] Add parse_age_limit 2014-10-03 19:37:25 +07:00
f78c01f68b [breakcom] Cover more URLs with _VALID_URL (Closes #3876) 2014-10-03 18:57:18 +07:00
8489578df4 [generic] Support embedded Dailymotion playlists (fixes #3822) 2014-10-02 21:42:45 +03:00
10606050bc release 2014.10.02 2014-10-02 15:30:19 +02:00
d9bf465200 [bandcamp] Fix id extraction and modernize (Closes #3866) 2014-10-02 20:22:46 +07:00
01384d6e4b [jpopsuki] More modernize 2014-10-02 19:58:28 +07:00
08d5230945 [jpopsukitv] Improve _VALID_URL and modernize 2014-10-02 19:43:25 +07:00
852f8641e8 Merge pull request #3865 from diffycat/jpopsuki
[jpopsuki] Support category links
2014-10-02 19:38:29 +07:00
18937a50a4 [dropbox] Fix the video url query (fixes #3868)
Previously it would convert urls without a query into 'something.mp4&dl=1'
2014-10-01 23:19:56 +02:00
net
e4d6cca0c1 [walla] Add new extractor 2014-10-01 23:45:35 +03:00
d5feab9aaa [jpopsuki] Support category links 2014-10-01 23:24:23 +04:00
9e77c60c9a [tapely] Catch SoundCloud tracks 2014-10-01 21:53:45 +03:00
1414df5ce2 [izlesene] Prepend scheme to thumbnails 2014-10-01 21:11:38 +03:00
e80f40e5ca [tapely] Add new extractor (closes #3861) 2014-10-01 17:26:09 +03:00
d3c9af84fc [spankwire] Fix extraction 2014-10-01 20:53:58 +07:00
59d206ca2d [sunporno] Fix duration extraction and make more robust 2014-10-01 20:44:43 +07:00
e7b6d12254 [utils] Improve and test js_to_json 2014-10-01 00:08:34 +02:00
410f3e73ab [utils] Fix js_to_json 2014-10-01 00:08:28 +02:00
07e764439a [generic] Delete test case
The page is not available any more.
2014-10-01 00:08:19 +02:00
f8fb4a7ca8 [nfl] Use compatible urlparse 2014-09-30 20:01:37 +03:00
e497a7f2ca [tvigle] Extract format file sizes 2014-09-30 20:00:21 +07:00
a3b6be104d [tvigle] Replace 404 test 2014-09-30 19:55:30 +07:00
b7bb0df21e [vgtv] Fix tests' exts 2014-09-30 19:50:14 +07:00
4dc19c0982 [lrt] Add new extractor 2014-09-30 02:26:16 +03:00
58ea7ec81e [vimeo] Fix description extraction 2014-09-29 22:23:21 +02:00
c0f64ac689 [test/helper] Improve output of missing test definition dictionaries 2014-09-29 22:19:11 +02:00
7a08ad7d59 [test/helper] Modernize 2014-09-29 22:11:24 +02:00
2d29ac4f23 [vuclip] Fix regexp 2014-09-29 21:48:44 +02:00
a7a747d687 [vuclip] Remove test code 2014-09-29 21:47:57 +02:00
fdb4d278bf [spankwire] Fix extraction and modernize 2014-09-29 20:11:51 +07:00
59c03a9bfb [vuclip] Fix extraction 2014-09-29 13:07:58 +02:00
e7db973328 [yahoo] Remove test case
This video seems to have been removed entirely
2014-09-29 12:45:57 +02:00
99b67fecc5 [arte] Fix upload date extraction 2014-09-29 12:45:18 +02:00
89294b5f50 [README] Explain updating in detail (Fixes #3850) 2014-09-29 12:33:29 +02:00
72d53356f6 [internetvideoarchive] Fix test case 2014-09-29 12:24:48 +02:00
9e1e67fc15 [internetvideoarchive] Modernize 2014-09-29 12:23:52 +02:00
1e60e5546e [funnyordie] Fix test case md5 2014-09-29 12:20:25 +02:00
457749a703 [prosiebensat1] Fix test case 2014-09-29 12:18:49 +02:00
937f935db0 [jukebox] Remove md5 sum, it fluctuates 2014-09-29 12:15:49 +02:00
80bcefcd77 [cliphunter] Remove duration 2014-09-29 06:22:54 +02:00
8c23945c72 [eporner] Adapt to changed default format 2014-09-29 06:19:18 +02:00
989b4b2b86 [utils:YoutubeDLHandler] Work around brain-dead Python 2.6 httplib
In 2.6, the httplib sends fragments! Remove those (fixes generic_26 on 2.6).
2014-09-29 06:15:46 +02:00
2a7b4681c6 [godtube] Fix on Python 2.6 2014-09-29 05:51:41 +02:00
8157ae3904 [golem] Fix under 2.6
It's a sad story; 2.6 does not support any non-trivial xpaths.
2014-09-29 05:48:56 +02:00
e50e2fcd4d [br] fix test case 2014-09-29 05:40:20 +02:00
6be451f422 [youtube] Remove swf signature test cases
These files are now 0 Bytes
2014-09-29 05:24:00 +02:00
5e4f06197f [facebook] Fix test case 2014-09-29 05:19:56 +02:00
761e1645e0 [generic] Remove unstable test checksum 2014-09-29 05:18:45 +02:00
8ff14175e2 [sportdeutschland] Fix testcase 2014-09-29 05:17:16 +02:00
dbe3043cd6 [ynet] Fix test checksums 2014-09-29 05:15:42 +02:00
a8eb5a8e61 [generic] Fix testcases 2014-09-29 05:12:57 +02:00
6043f1df4e [ign] Return proper playlist object 2014-09-29 05:05:06 +02:00
12548cd933 [worldstarhiphop] Correct title extraction 2014-09-29 05:02:58 +02:00
2593039522 [vimeo] Use regexps to find description
This fixes descriptions on 2.6 and makes the code simpler.
2014-09-29 04:58:31 +02:00
35d3e63d24 release 2014.09.29.2 2014-09-29 04:49:11 +02:00
27aede9074 [pbs] Add support for series/jwplayer type video (Fixes #3849) 2014-09-29 04:48:50 +02:00
f5b7e6a842 release 2014.09.29.1 2014-09-29 02:04:28 +02:00
a1f934b171 [youtube] Correct language cookie handling 2014-09-29 02:04:16 +02:00
a43ee88c6f release 2014.09.29 2014-09-29 01:51:53 +02:00
e2dce53781 [youtube] Always request webpage in English (Fixes #3844) 2014-09-29 01:39:26 +02:00
1770ed9e86 [thvideo] Simplify (#3848) 2014-09-29 00:38:37 +02:00
457ac58cc7 Merge remote-tracking branch 'diffycat/thvideo-update' 2014-09-29 00:36:55 +02:00
9c44d2429b [vimeo:likes] Support large like lists (Fixes #3847) 2014-09-29 00:36:06 +02:00
d2e32f7df5 Do not use HTML characters in output
This messes up the format when people paste it outside of code tags.
2014-09-29 00:23:43 +02:00
67077b182b [thvideo] Add support for playlists 2014-09-28 23:36:55 +04:00
5f4c318844 [nfl] Support team micro-sites (fixes #3831) 2014-09-28 21:48:26 +03:00
dfee83234b [nfl] Prefer progressive downloads 2014-09-28 19:25:28 +03:00
7f5c0c4a19 [README] Clarify test's md5 filesize (#3846) 2014-09-28 22:10:20 +07:00
4bc77c8417 [README] Use _match_id helper function 2014-09-28 13:52:21 +02:00
22dd3fad86 release 2014.09.28.1 2014-09-28 12:14:25 +02:00
d6e6a42256 [vimeo:likes] Add new extractor (Fixes #3835) 2014-09-28 12:14:16 +02:00
76e7d1e74b [played] Remove unused import 2014-09-28 10:56:36 +02:00
38c4d41b74 [played] Simplify (#3798) 2014-09-28 10:55:27 +02:00
f0b8e3607d Merge remote-tracking branch 'r4mos/played' 2014-09-28 10:52:23 +02:00
51ee08c4bb Remove unused imports 2014-09-28 10:50:43 +02:00
c841789772 [muenchentv] Add thumbnail 2014-09-28 10:49:58 +02:00
c121a75b36 [heise] Add support for description 2014-09-28 10:49:12 +02:00
5a8b77551d [heise] Simplify (#3842) 2014-09-28 10:47:25 +02:00
0217aee154 Merge remote-tracking branch 'd912e3/heise' 2014-09-28 10:36:44 +02:00
b14f3a4c1d [golem] Simplify (#3828) 2014-09-28 10:35:19 +02:00
92f7963f6e Merge remote-tracking branch 'd912e3/golem' 2014-09-28 10:10:34 +02:00
88fbe4c2cc release 2014.09.28 2014-09-28 09:49:42 +02:00
394599f422 [oktoberfesttv] Add new extractor (Fixes #3845) 2014-09-28 09:49:21 +02:00
ed9266db90 [common] Add new helper function _match_id 2014-09-28 09:31:58 +02:00
f4b1c7adb8 [muenchentv] Move live title generation to common 2014-09-28 08:53:52 +02:00
c95eeb7b80 [eitb] Modernize 2014-09-28 08:49:03 +02:00
5e43e3803c Credit @lenaten for ynet (#3840) and sport5 (#3841) 2014-09-28 03:45:15 +07:00
a89435a7a8 [ynet] Improve _VALID_URL 2014-09-28 03:30:41 +07:00
a0a90b3ba1 Merge branch 'lenaten-ynet' 2014-09-28 03:26:47 +07:00
c664182323 [ynet] Remove unused stuff, simplify and improve 2014-09-28 03:26:38 +07:00
6be1cd4ddb Merge branch 'ynet' of https://github.com/lenaten/youtube-dl into lenaten-ynet 2014-09-28 02:56:51 +07:00
ee0d90707a [YoutubeDL] Fix string check for python3 2014-09-28 02:48:41 +07:00
f776d8f608 [sport5] Keep alphanumeric order 2014-09-28 02:35:46 +07:00
b3ac3a51ac Merge branch 'lenaten-sport5' 2014-09-28 02:32:52 +07:00
0b75c2a88b [sport5] Capture error message and improve 2014-09-28 02:31:14 +07:00
7b7518124e [heise] Don't check string type
Before Python 3 could be unicode, so don't check at all.
2014-09-27 21:12:23 +02:00
68b0973046 [YoutubeDL] Expect all kind of strings in urlopen
Now it doesn't fail if req is python2's str
2014-09-28 02:07:42 +07:00
3a203b8bfa Merge branch 'sport5' of https://github.com/lenaten/youtube-dl into lenaten-sport5 2014-09-28 00:55:16 +07:00
70752ccefd [golem] Don't omit positional argument specifiers
Required by Python 2.6.
2014-09-27 19:35:55 +02:00
0155549d6c [heise] Add new extractor 2014-09-27 19:28:01 +02:00
net
b66745288e [sport5] Add new extractor 2014-09-27 20:21:46 +03:00
net
2a1325fdde [ynet] Add new extractor 2014-09-27 20:11:22 +03:00
2f9e8776df [extremetube] Fix extraction 2014-09-27 22:36:53 +07:00
497339fa0e [anysex] Fix extraction 2014-09-27 22:29:27 +07:00
8e6f8051f0 [vbox7] Don't set the extension to 'flv' (fixes #3836) 2014-09-27 10:53:02 +02:00
11b3ce8509 [crunchyroll] Allow to list subtitles (fixes #3805) 2014-09-25 17:57:38 +03:00
6a5af6acb9 [golem] Add new extractor 2014-09-25 16:25:53 +02:00
9a0d98bb40 [vube] Update tests 2014-09-25 20:57:18 +07:00
fbd3162e49 [vube] Add DMCA notice 2014-09-25 20:48:54 +07:00
54e9a4af95 [wat] Skip test 2014-09-25 20:33:11 +07:00
8a32b82e46 [youku] Modernize somewhat 2014-09-25 09:58:09 +02:00
fec02bcc90 [hlsnative] Correct handling when remaining_bytes is None 2014-09-25 09:21:45 +02:00
c6e90caaa6 Merge remote-tracking branch 'naglis/wistia' 2014-09-25 02:03:49 +02:00
4bbf157794 release 2014.09.25 2014-09-25 01:59:45 +02:00
6b08cdf626 [youtube] Support for embedded /p players (Fixes #3821) 2014-09-25 01:59:02 +02:00
b686fc18da [hlsnative] Support test parameter 2014-09-24 14:38:40 +02:00
0b97f3a936 release 2014.09.24.1 2014-09-24 14:17:42 +02:00
eb73f2649f [vevo] Skip SMIL download 2014-09-24 14:17:33 +02:00
f0b5d6af74 [vevo] Support 1080p videos (Fixes #3656) 2014-09-24 14:16:56 +02:00
2f771f6c99 release 2014.09.24 2014-09-24 11:06:46 +02:00
3b2f933b01 [generic] Allow embedded YoutubePlaylists (Fixes #3821) 2014-09-24 11:05:14 +02:00
cc746841e7 [flickr] Modernize 2014-09-24 10:46:33 +02:00
ac7553d031 [youtube] Support embed/videoseries URLs (#3821) 2014-09-24 10:34:29 +02:00
cdc628a498 [youtube] Move more tests to extractors 2014-09-24 10:25:47 +02:00
69ea8ca42c [youtube] Remove superfluous unicode specifiers 2014-09-24 09:51:45 +02:00
4bc3a23ec5 [youtube] Modernize 2014-09-24 09:49:53 +02:00
bd5650ac64 [nfl] Fix test case - download, but don't check md5 2014-09-23 20:42:28 +03:00
86916dae4b [wat] Capture and output error message 2014-09-23 19:58:35 +07:00
f7d159cf95 [noco] Encode before passing to hashlib.md5 (Closes #3816) 2014-09-23 19:13:11 +07:00
632e5684ce [nfl] Add new extractor. (Closes #3815) 2014-09-23 00:28:19 +03:00
094d42fe44 release 2014.09.22.1 2014-09-22 18:15:07 +02:00
63cddb6477 [sbs] Recognize urls with format 'http://www.sbs.com.au/ondemand/video/<id>' (#3811) 2014-09-22 14:11:08 +02:00
273dea4248 [playfm] Fix view count and add comment count 2014-09-22 18:58:22 +07:00
f90d95edeb release 2014.09.22 2014-09-22 13:07:23 +02:00
45c85d7ba1 Merge remote-tracking branch 'origin/master' 2014-09-22 12:53:41 +02:00
d0df92928b [npo] Add extractor for tegenlicht.vpro.nl (closes #3778) 2014-09-21 16:53:00 +02:00
df8f53f752 [thvideo] Support mobile URLs as well 2014-09-21 16:32:38 +02:00
e35cb78c40 [theplatform] Correctly extract videos that don't use f4m or rtmp (reported in #3176) 2014-09-21 16:08:38 +02:00
3ef7d11acd Merge remote-tracking branch 'diffycat/thvideo' 2014-09-21 15:50:01 +02:00
224ce0d872 [nbc] Update test 2014-09-21 15:49:04 +02:00
dd41e8c82b [theplatform] Extract all formats for f4m videos 2014-09-21 15:47:58 +02:00
b509a4b176 [downloader/f4m] If <pv-2.0> is in the manifest, add it to the fragments urls query (fixes #3176)
It's used in some akamai videos (for example for theplatform.com).
2014-09-21 15:43:09 +02:00
b28c8403b2 [yourupload] Add new extractor. Fixes #3085 2014-09-21 15:13:35 +03:00
7bd4b4229a [dropbox] Recognize 'https://www.dropbox.com/sh/*' urls (fixes #3795)
And extract the title from the url last path component.
2014-09-21 13:42:08 +02:00
72e450c555 [thvideo] Add support for THVideo 2014-09-21 13:21:29 +04:00
522c55b7f2 [mgoon] Add new extractor 2014-09-21 03:26:12 +03:00
58e7071a2c [tube8] Improve _VALID_URL and add display_id 2014-09-21 06:37:11 +07:00
516812df41 Merge pull request #3801 from megustamucho/master
[tube8] Improved regex
2014-09-21 06:29:13 +07:00
752297631f [noco] Adapt to API v1.1 (Closes #3797) 2014-09-21 06:20:42 +07:00
34e14a9beb Merge remote-tracking branch 'upstream/master' 2014-09-20 22:41:45 +10:00
ffb5b05db1 Merge pull request #3800 from msmuenchen/patch-1
[divxstage] added .to TLD
2014-09-20 16:59:14 +07:00
3e8fcd9fa1 [divxstage] added .to TLD
Example video "http://www.divxstage.eu/video/930c52709d2" which gets redirected to .to TLD
2014-09-20 02:32:41 +02:00
746c67d72f [wistia] Use API and make more generic 2014-09-20 03:02:11 +03:00
5aa38e75b2 [played] Add new extractor 2014-09-19 22:46:57 +02:00
532f5bff70 [franceinter] Fix extraction and modernize 2014-09-19 20:58:50 +07:00
f566d9f1d5 release 2014.09.19 2014-09-19 09:58:01 +02:00
7267bd536f [muenchentv] Add support (Fixes #3507) 2014-09-19 09:57:53 +02:00
589d3d7c7a [moniker] rename from hypestat (#3788) 2014-09-18 21:37:09 +02:00
46f74bcf5c [soundcloud] Fix non-secret playlists 2014-09-18 18:57:04 +02:00
37bfe8ace4 [hypestat] Match URLs with www. and https:// 2014-09-18 18:56:02 +02:00
0529eef5a4 [hypestat] Unify allmyvideos and vidspot (Closes #3788) 2014-09-18 18:54:03 +02:00
fd78a4d3e6 release 2014.09.18 2014-09-18 18:43:59 +02:00
1de33fafd9 [YoutubeDL] Allow downloading multiple formats with , 2014-09-18 18:43:49 +02:00
e2e5dae64d Add -f m4a 2014-09-18 18:40:19 +02:00
09b23c902b [1up.com] Urls end now with '.html' 2014-09-18 17:02:53 +02:00
109a540e7a [ign] Fix extraction 2014-09-18 16:57:34 +02:00
2914e5f00f [drtuber] Fix categories 2014-09-18 20:56:54 +07:00
2f834e9381 [soundcloud] Secret playlists and sets
Closes #3707 again.

No test cases because I don't know what urls to use that won't be turned
into public eventually (as it happened with the first one in that
ticket)
2014-09-18 06:35:11 -03:00
9296738f20 [soundcloud] Support api urls with secret_token, Closes #3707 2014-09-18 03:02:03 -03:00
0e59b9fffb [videomega] Simplify (#3786)
* Use raw strings (r'foo') for regular expressions (enables highlighting and avoids some errors).
* title is always true-ish
2014-09-18 00:19:08 +02:00
67abbe9527 [videomega] Add new extractor. Closes #3775 2014-09-17 22:57:01 +03:00
944a3de278 [npo] Improve formats extraction (Closes #3782) 2014-09-17 19:02:57 +07:00
5a13fe9ed2 Credit @m5moufl for behindkink (#3740) 2014-09-17 12:50:47 +02:00
6b6096d0b7 release 2014.09.16.1 2014-09-16 23:35:15 +02:00
d0246d07f1 Merge remote-tracking branch 'origin/master' 2014-09-16 23:35:08 +02:00
727a98c3ee Merge remote-tracking branch 'r4mos/master' 2014-09-16 23:33:26 +02:00
997987d568 Credit @r4mos for allmyvideos 2014-09-16 23:33:13 +02:00
c001f939e4 [Allmyvideos] Fixed md5. Only 10KiB of the video file 2014-09-16 23:23:54 +02:00
e825c38082 [allmyvideos] Support multiple formats 2014-09-16 22:58:31 +02:00
a04aa7a9e6 [generic] The url in the <meta redirect> doesn't need to be enclosed in single quotes (fixes #3770)
See the examples in https://en.wikipedia.org/wiki/Meta_refresh or the shortened urls from https://t.co/.
2014-09-16 22:53:48 +02:00
7cdd5339b3 Change tabs to spaces 2014-09-16 21:05:50 +02:00
38349518f1 Added new host: allmyvideos.net 2014-09-16 20:48:53 +02:00
64892c0b79 [francetv] Adapt to new API (Closes #3751, closes #3769) 2014-09-16 23:47:59 +07:00
dc9f356846 [ard] Remove URL path escaping
This is now implemented generic in YoutubeDL.urlopen
2014-09-16 18:47:06 +07:00
ed86ee3b4a release 2014.09.16 2014-09-16 10:09:02 +02:00
7bb5df1cda [nhl] Match videos without catid (Fixes #3764) 2014-09-16 10:08:34 +02:00
37a81dff04 [behindkink] Remove call to report_extraction 2014-09-15 23:37:22 +02:00
fc96eb4e21 Merge remote-tracking branch '5moufl/behindkink' 2014-09-15 23:36:21 +02:00
ae369738b0 Credit @haricharan for einthusan (#3755) 2014-09-15 23:35:00 +02:00
e2037b3f7d [einthusan] Add description and beautify 2014-09-15 23:33:47 +02:00
5419033935 Fixed tests 2014-09-15 23:27:18 +02:00
2eebf060af Merge commit '98703c7fbfcf06348220aa63f9422cdd792cfe1a' 2014-09-15 23:26:54 +02:00
acd9db5902 Merge remote-tracking branch 'naglis/nosvideo' 2014-09-15 16:10:52 +02:00
d0e8b3d59b [nosvideo] Make more robust against missing metadata 2014-09-15 16:59:03 +03:00
c15dd15388 Merge remote-tracking branch 'naglis/turbo' 2014-09-15 15:48:48 +02:00
0003a5c416 Merge remote-tracking branch 'dstftw/escape-non-ascii-in-urls'
Conflicts:
	test/test_utils.py
2014-09-15 15:40:10 +02:00
21f2927f70 release 2014.09.15.1 2014-09-15 15:28:41 +02:00
e5a79071a5 [daum] Support non-numeric video IDs (Fixes #3749) 2014-09-15 15:25:35 +02:00
ca0e7a2b17 release 2014.09.15 2014-09-15 15:11:22 +02:00
b523bb71ab Merge remote-tracking branch 'origin/master' 2014-09-15 15:11:15 +02:00
a020a0dc20 [facebook] Fix support for untitled videos (Fixes #3757) 2014-09-15 15:10:24 +02:00
6d1f2431bd [BehindKink] Minor fixes
- fix _VALID_URL regex
 - remove unnecessary variable
 - remove second call of report_extraction
2014-09-15 15:09:17 +02:00
fdea3abdf8 [turbo] Add new extractor 2014-09-15 16:08:20 +03:00
59d284c316 [vporn] Make video URL regex more strict
There is a garbage instead of proper URL for some HD videos
2014-09-15 19:19:37 +07:00
98703c7fbf Einthusan Add new extractor 2014-09-14 23:14:00 -05:00
b04c8f7358 release 2014.09.14.3 2014-09-14 16:48:15 +02:00
56d1912f1d Add a completion script generator for the fish shell 2014-09-14 14:07:33 +02:00
eb3bd7ba8d [cloudy] Retry extraction on 410 status code (#3743 #3744) 2014-09-14 19:04:16 +07:00
3b11e86eeb release 2014.09.14.2 2014-09-14 12:56:04 +02:00
5ba693d082 release 2014.09.14.1 2014-09-14 12:23:41 +02:00
11ab41658e [devscripts/release] Do not restore youtube-dl{,.exe} binaries 2014-09-14 12:23:37 +02:00
ae91f6f99b release 2014.09.14 2014-09-14 12:20:27 +02:00
3da0e1f8cd [tumblr] Modernize 2014-09-14 12:20:14 +02:00
98676c08a1 [cloudy] Add support for videoraj.ch 2014-09-14 05:17:21 +07:00
5dbf3b5c60 [cloudy] Minor changes 2014-09-14 05:01:25 +07:00
b8d61935a0 Merge pull request #3744 from naglis/cloudy
[cloudy] Add new extractor
2014-09-14 04:56:32 +07:00
20ff802c9b [cloudy] Add new extractor. Closes #3743 2014-09-14 00:12:36 +03:00
2bca84e345 [BehindKink] Add new extractor 2014-09-13 17:47:19 +02:00
e5a93354bc [drtv] Fix _VALID_URL (Closes #3735) 2014-09-13 22:07:09 +07:00
ceff3fd8ef [pornhd] Fix extraction (Closes #3739) 2014-09-13 21:45:53 +07:00
984e8e14ea [utils] Remove debug garbage 2014-09-13 21:08:04 +07:00
d05cfe0600 [YoutubeDL/utils] Clarify rationale for URL escaping in comment, move escape routines to utils and add some tests 2014-09-13 20:59:16 +07:00
7e6a715380 [adultswim] Avoid extremely long lines 2014-09-13 09:13:20 +02:00
d74bebd502 [utils] Apply 2.6 xpath craziness
This fixes ARD on 2.6
2014-09-13 09:11:14 +02:00
bf0ff93277 [ard] Make more robust against missing thumbnails
I cannot reproduce this error, it's from travis.
2014-09-13 09:09:57 +02:00
dc752ff442 [br] Remove deleted video test case 2014-09-13 09:04:53 +02:00
3a36d3782e [chilloutzone] Make test case more flexible 2014-09-13 09:04:03 +02:00
50317dbbad [facebook] Fix and caption if title is empty 2014-09-13 09:01:57 +02:00
6539c91a78 [deezer] Add support for geoblocking 2014-09-13 08:51:21 +02:00
4eefbfdbfd [utils] Fix find_xpath_attr on 2.6 2014-09-13 08:34:15 +02:00
652f283135 [youporn] Remove md5 from test
Has changed multiple times, and we're checking that it looks like a video now by default.
2014-09-13 07:59:36 +02:00
2daabe4992 [options] Correct option usage 2014-09-13 07:58:44 +02:00
34a741a8ec [options] Move all options to their own file 2014-09-13 07:55:21 +02:00
78caa52aea [youtube] Modernize 2014-09-13 07:51:06 +02:00
310d75d141 [youtube] Correct description in test case 2014-09-13 07:49:26 +02:00
67b7e712d8 [xhamster] Remove md5sums from tests
We're now checking that the file is sufficiently large by default, and they seem to reencode the videos from time to time.
2014-09-13 07:47:08 +02:00
1ad6efd9b6 [spiegel] Remove deleted video from test cases 2014-09-13 07:46:19 +02:00
c3c3fe4768 [swrmediathek] Remove deleted video test case 2014-09-13 07:41:35 +02:00
6f6ed04172 [prosiebensat1] Make upload_date optional
The site has a crazy incorrect date in German formatting, but the correct date is nowhere to be found.
2014-09-13 07:39:29 +02:00
5309602106 [prosiebensat1] Fix bitrate calculation 2014-09-13 07:35:27 +02:00
9291475f7d [youtube] Move more tests to Playlist IE, and simply suitable. 2014-09-13 07:35:14 +02:00
81127aa55d [youtube] Move playlist test to extractor 2014-09-13 07:19:20 +02:00
9b583dca4c Merge remote-tracking branch 'drags/yt-feed-loadmore' 2014-09-13 07:14:19 +02:00
f4a3490cbc [pornoxo] Fix test (view count) 2014-09-13 07:09:39 +02:00
3d9fae1ed2 Add support for PornoXO 2014-09-13 07:07:39 +02:00
5c47fc6809 Remove executable files.
These files are for a legacy updating system. This updating system would only apply to users who have not used youtube-dl for at least a year (because if they did use it, they would notice that basically everything about every service, maybe except a couple of YouTube videos, has changed in between).
We often get commits including these files, and they are irritating for us core developers as well. Good riddance!
If anyone notes that their updates fail, they can just reinstall (see http://rg3.github.io/youtube-dl/download.html for instructions).
2014-09-13 07:04:54 +02:00
89fb6a9797 [spiegel] Add support for articles 2014-09-13 06:55:38 +02:00
b734a87112 Merge remote-tracking branch 'origin/master' 2014-09-13 06:00:50 +02:00
573585c234 [README] Add old Ubuntu versions to FAQ 2014-09-13 05:54:45 +02:00
da2e1f53e0 [deezer] Add simple support 2014-09-13 05:47:07 +02:00
f24e740bb8 [telemb] Add comment to test 2014-09-12 23:22:07 +07:00
37419b4f99 [YoutubeDL] Escape non-ASCII characters in URLs
urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
Working around by replacing request's original URL with escaped one
2014-09-12 23:20:17 +07:00
64a450131b Merge branch 'Lovius-master' 2014-09-12 20:55:09 +07:00
adf2c0989d [telemb] Extract all formats and modernize 2014-09-12 20:51:48 +07:00
8fb7ff25c5 Merge branch 'master' of https://github.com/Lovius/youtube-dl into Lovius-master 2014-09-12 18:18:26 +07:00
473219a778 release 2014.09.12 2014-09-12 07:56:05 +02:00
aa37e3d486 [utils] Default SSL to TLS. (Fixes #3727)
On 2.x, we now try TLS first, and fall back to the compat 23 (basically anything) afterwards.
On 3.4+, we now use the proper function so that we get all the latest security configurations.
We allow SSLv3 though for the time being, since a lot of older pages use that.
On 3.3, we default to SSLv23 (basically "anything, including TLS") because that has the widest compatibility.
2014-09-12 07:50:31 +02:00
edb53e2dc3 [youtube] Don't make the url protocol optional
The generic extractor will add it.
2014-09-11 21:47:25 +02:00
f401a4173b [youjizz] Don't make the url protocol optional (fixes #3722)
It would fail to download the webpag if it's missing.
And the generic extractor will add it.
2014-09-11 21:44:02 +02:00
de00ff6494 Merge pull request #3716 from naglis/nosvideo
[nosvideo] Check for deleted video
2014-09-11 19:20:13 +07:00
3616997929 Merge pull request #3718 from naglis/tv3play
[tvplay] Support similar services in different countries
2014-09-11 19:09:38 +07:00
eef93b0912 [tvplay] Support similar services in different countries 2014-09-11 00:10:18 +03:00
08d037309e [nosvideo] Update test 2014-09-10 18:19:59 +03:00
6024b0f25e [nosvideo] Check for deleted videos 2014-09-10 18:10:21 +03:00
561266641f release 2014.09.10.1 2014-09-10 16:39:23 +02:00
6899f2fe9e [hostingbulk] Simplify 2014-09-10 19:13:22 +07:00
136c8bd275 Merge pull request #3709 from naglis/hostingbulk
[hostingbulk] Add new extractor
2014-09-10 19:04:14 +07:00
1bf5423e82 [generic/youtube] Recognize youtube nocookie embeds (Closes #3713) 2014-09-10 18:29:20 +07:00
2c5c1f48e9 [izlesene] Simplify 2014-09-10 18:24:57 +07:00
d8e487fbd9 Merge pull request #3710 from naglis/izlesene_fix
Izlesene fix
2014-09-10 18:20:57 +07:00
bc7ff0a8dd release 2014.09.10 2014-09-10 12:26:39 +02:00
8e9da53140 [izlesene] Remove API URL 2014-09-10 00:24:48 +03:00
f1d15e6dbc [izlesene] Adapt to website changes and improve 2014-09-10 00:22:48 +03:00
91ebb17ede [hostingbulk] Add new extractor 2014-09-09 22:17:33 +03:00
c63b30901b [noco] Add authenticate support (Closes #3706) 2014-09-10 01:24:05 +07:00
f009f19ece [rutv] Fix rutv player regex 2014-09-10 01:09:04 +07:00
68477e8839 [khanacademy] Improve _VALID_URL (Closes #3695) 2014-09-09 22:09:32 +07:00
0dc5365564 [moevideo] Make filesize exact 2014-09-09 21:48:10 +07:00
9face18d08 Merge pull request #3690 from naglis/sharesix
[sharesix] Add new extractor
2014-09-09 21:14:59 +07:00
ff0ba8ce0f [moevideo] Add videochart.net to _VALID_URL 2014-09-09 21:04:57 +07:00
759c6293bd Merge pull request #3691 from naglis/moevideo
[moevideo] Add new extractor
2014-09-09 20:56:35 +07:00
a8aa99442f Merge branch 'master' of https://github.com/megustamucho/youtube-dl 2014-09-09 13:07:16 +10:00
94b539d155 [tube8] Improved regex 2014-09-09 13:05:14 +10:00
b8874d4d4e [tube8] Improved regex 2014-09-09 12:48:56 +10:00
3fbeb95e14 [vgtv] Add view count to test cases 2014-09-08 21:33:03 +07:00
6e25f51cdf Merge branch 'mrkolby-vgtv' 2014-09-08 21:26:59 +07:00
321c1e44f9 [vgtv] Extract all formats, add support for wasLive and live streamTypes 2014-09-08 21:26:42 +07:00
cc7fec5818 Merge branch 'vgtv' of https://github.com/mrkolby/youtube-dl into mrkolby-vgtv 2014-09-08 19:49:23 +07:00
5fb9077e8c [moevideo] Add new extractor 2014-09-07 01:21:58 +03:00
8e20f81c5b [sharesix] Add new extractor 2014-09-06 18:59:15 +03:00
e154762c74 release 2014.09.06 2014-09-06 15:26:38 +02:00
ba92ab3d05 Merge branch 'Rudloff-unistra_hd' 2014-09-06 15:22:01 +07:00
a2f0cdc074 [unistra] Modernize 2014-09-06 15:21:27 +07:00
0933440086 added extractor and __init__ loader for belgium local tv site telemb.be 2014-09-06 09:36:14 +02:00
70a1ecd2c1 Merge branch 'unistra_hd' of https://github.com/Rudloff/youtube-dl into Rudloff-unistra_hd 2014-09-05 22:26:31 +07:00
88a23aef5a [http] Avoid closing stdout (Fixes #3686) 2014-09-05 22:05:36 +07:00
140d8d77b3 Credit @mrkolby for dbtv.no (#3685) 2014-09-05 20:00:12 +07:00
665cd96929 Merge branch 'mrkolby-dbtv' 2014-09-05 19:54:10 +07:00
4d067a58ca [dbtv] Simplify, modernize, extract all formats 2014-09-05 19:53:53 +07:00
1c1cff6a52 Merge branch 'dbtv' of https://github.com/mrkolby/youtube-dl into mrkolby-dbtv
Conflicts:
	youtube_dl/extractor/__init__.py
2014-09-05 19:01:11 +07:00
f063a04f07 [dbtv] Add new extractor 2014-09-05 11:24:30 +02:00
af8812bb9b Add HD detection to Unistra 2014-09-04 22:22:19 +02:00
78149a962b [vgtv] Add new extractor
Because of the #! in the URL structure for VGTV we need to add ' before and after given URL. Or else it will cry:

-bash: !/video/100495/lars-og-lars-sesong-6-episode-6-lakselus: event not found
2014-09-04 21:02:47 +02:00
f2d9e3a370 [arte.tv:+7] Allow single quotes for json vp url regexes (Closes #3676) 2014-09-05 01:17:16 +07:00
16e6f396b4 release 2014.09.04.3 2014-09-04 16:20:17 +02:00
c6ec6b2e8b [arte.tv:+7] Add one another one pattern for json vp url 2014-09-04 20:44:51 +07:00
7bbc6428b6 [nba] Modernize 2014-09-04 20:06:14 +07:00
c1a3c9ddb2 [techtalks] Modernize 2014-09-04 19:48:29 +07:00
feec0f56f5 [toypics:user] Update test playlist count 2014-09-04 19:37:40 +07:00
8029857d27 [washingtonpost] Add playlist title to test 2014-09-04 19:34:40 +07:00
aa61802c1e release 2014.09.04.2 2014-09-04 06:40:44 +02:00
f54aee0209 [cliphunter] Add support for more formats 2014-09-04 06:40:15 +02:00
5df921b0e3 [test_cache] Add a dot in the file name 2014-09-04 04:51:52 +02:00
35d5b67876 release 2014.09.04.1 2014-09-04 04:48:05 +02:00
674c869af4 [cache] Allow dots in keys (Fixes #3674) 2014-09-04 04:47:56 +02:00
10710ae386 release 2014.09.04 2014-09-04 01:30:43 +02:00
a0e07d3161 [youtube] Move cache into its own module 2014-09-03 17:29:19 +02:00
88fc294f7f Merge branch 'peugeot-tnaflix' 2014-09-03 21:08:50 +07:00
a232bb9551 [empflix] Rewrite in terms of tnaflix 2014-09-03 21:08:36 +07:00
eb833b7f5a [tnaflix] Improve and make generic 2014-09-03 21:07:18 +07:00
f164038b79 [utils] Make parse_duration case insensitive 2014-09-03 21:03:36 +07:00
f7a361c4f1 Merge branch 'tnaflix' of https://github.com/peugeot/youtube-dl into peugeot-tnaflix 2014-09-03 20:11:49 +07:00
884ae74785 [tvigle] Adapt to the new API 2014-09-03 19:59:36 +07:00
1dba4a2185 Add support for TNAFlix 2014-09-03 14:10:06 +02:00
7d4d5f25ed [facebook] Fix login (Fixes #3667) 2014-09-03 09:50:10 +02:00
33422c056d [drtuber] Add display_id to test 2014-09-02 21:40:03 +07:00
a7862a1bc8 [eporner] Extract all formats 2014-09-02 21:39:22 +07:00
3baa62e8d1 [beeg] Extract all formats 2014-09-02 20:54:00 +07:00
1bf8cf5c2c [drtuber] Extract display_id 2014-09-02 20:39:16 +07:00
eade1d7eab [drtuber] Extract counters 2014-09-02 20:36:26 +07:00
1a94ff6865 [mlb] Add support for embedded videos (Closes #3653) 2014-09-02 20:19:28 +07:00
b47ed50aaf [nosvideo] Remove determine_ext usage (#3655) 2014-09-02 00:17:04 +02:00
1b8477729a Merge remote-tracking branch 'naglis/nosvideo' 2014-09-02 00:14:58 +02:00
ff6ade294c release 2014.09.01.2 2014-09-01 23:58:23 +02:00
11fc065c57 [drtuber] Add age_limit and be conservative about categories (#3652) 2014-09-01 23:57:46 +02:00
94388f50b3 Merge remote-tracking branch 'peugeot/drtuber' 2014-09-01 23:17:27 +02:00
a444648202 Merge remote-tracking branch 'origin/master'
Conflicts:
	youtube_dl/extractor/eporner.py
	youtube_dl/extractor/hornbunny.py
2014-09-01 23:13:49 +02:00
7ca2e11f24 [beeg] Add age_limit 2014-09-01 23:13:04 +02:00
563f6dea59 [eporner] +age_limit 2014-09-01 23:07:48 +02:00
e4039057be [hornbunny] Simplify and add age_limit 2014-09-01 23:04:36 +02:00
11342b54d6 [anysex] Implement age_limit 2014-09-01 22:58:59 +02:00
49fa38adf2 [nosvideo] Add new extractor 2014-09-01 23:47:14 +03:00
9b330db7f0 Add age limit to anysex, beeg, eporner, hornbuny and sunporno 2014-09-01 22:38:40 +02:00
d740f7e16f Add support for drtuber.com 2014-09-01 20:12:51 +02:00
07e7dc4bdc [comedycentral] Prefer RTMP for now 2014-09-01 18:37:21 +02:00
4c59dc4c34 [comedycentral] Support news-team URLs (Fixes #3649) 2014-09-01 18:37:10 +02:00
7260ea0705 [comedycentral] Move tests to the extractor itself 2014-09-01 17:57:45 +02:00
35b1e44567 Fix --rm-cache-dir error message 2014-09-01 17:11:28 +02:00
c9ea760e31 [npo] Extract all formats 2014-09-01 20:13:32 +07:00
9ebf22b7d9 [common] Improve codecs extraction from m3u8 2014-09-01 20:13:04 +07:00
2582bebe06 [utils] Use flock instead of lockf (Fixes #3621) 2014-09-01 01:41:25 +02:00
c9cc0bf57b [youtube:playlist] PEP8 2014-09-01 01:00:40 +02:00
61edcfb0a2 release 2014.09.01.1 2014-09-01 00:20:18 +02:00
a8be56ce3d [tudou] Add test case for #3643 2014-09-01 00:20:12 +02:00
329818484c Merge remote-tracking branch 'naglis/tudou_fix' 2014-09-01 00:17:01 +02:00
8bdfddf641 [tudou] Modernize 2014-09-01 00:16:26 +02:00
36d65b61d4 release 2014.09.01 2014-09-01 00:02:47 +02:00
7d48c06f27 Merge remote-tracking branch 'origin/master' 2014-09-01 00:01:48 +02:00
d169e36f5c [beeg] PEP8 and additional tests 2014-09-01 00:00:01 +02:00
2d7af09487 Merge remote-tracking branch 'peugeot/beeg' 2014-08-31 23:57:51 +02:00
48d4681efc [eporner] Simplify and correct (#3629) 2014-08-31 23:56:14 +02:00
9ea9b61448 Merge branch 'master' of github.com:rg3/youtube-dl 2014-08-31 23:47:26 +02:00
04b4aa4a7b Merge remote-tracking branch 'peugeot/eporner' 2014-08-31 23:46:27 +02:00
5a3f0d9aee Merge remote-tracking branch 'peugeot/hornbunny' 2014-08-31 23:45:24 +02:00
1ed5b5c9c8 [youtube:subscriptions] Fix extraction (fixes #2975) 2014-08-31 23:44:43 +02:00
d10548b691 Merge branch 'master' of github.com:rg3/youtube-dl 2014-08-31 23:44:06 +02:00
e990510e6b Merge pull request #3616 from oneplusme/crunchyroll-login
Add login support to Crunchyroll extractor
2014-08-31 23:42:28 +02:00
55f7bd2dcc [youtube] Fix category extraction 2014-08-31 23:26:19 +02:00
f931e25959 [tudou] Sort qualities by numeric value 2014-08-31 23:14:35 +03:00
ca9cd290c7 [tudou] Fix #3643 - filter non-number qualities 2014-08-31 23:11:52 +03:00
49e23e8b6a Merge branch 'peugeot-sunporno' 2014-09-01 01:10:18 +07:00
ae7246e7d5 [sunporno] Extract all formats and metadata 2014-09-01 01:08:19 +07:00
43fd392413 Merge branch 'sunporno' of https://github.com/peugeot/youtube-dl into peugeot-sunporno 2014-09-01 00:45:51 +07:00
3e7c12240c [youtube] Test for like_count and dislike_count (#3633) 2014-08-31 18:10:05 +02:00
7eb21356f9 Add support for sunporno.com 2014-08-31 12:48:34 +02:00
f30a38be8b [youtube] Fix extraction of like and dislike count (fixes #3633) 2014-08-31 12:38:00 +02:00
2aebbccefc Add support for beeg.com 2014-08-31 11:57:10 +02:00
b170935a8f Merge branch 'peugeot-anysex' 2014-08-31 07:06:08 +07:00
35241d05d1 [anysex] Simplify and extract more metadata 2014-08-31 07:05:53 +07:00
be2dd0651e Merge branch 'anysex' of https://github.com/peugeot/youtube-dl into peugeot-anysex 2014-08-31 06:45:40 +07:00
6a400a6339 Merge branch 'peugeot-vporn' 2014-08-31 06:44:14 +07:00
7b53af7f70 [vporn] Fix issues, extract all formats and metadata 2014-08-31 06:43:36 +07:00
ca7b3246b6 [utils] Improve parse_duration 2014-08-31 06:41:30 +07:00
9c4c233b84 Fix exception with n_views<1000 2014-08-30 23:05:33 +02:00
8a6c59865d Fix exception with n_views<1000 2014-08-30 23:03:37 +02:00
1d57b2520c Add support for Eporner 2014-08-30 22:28:53 +02:00
17b0b8a166 testing view count is stupid 2014-08-30 20:55:42 +02:00
12c82cf9cb add support for view count 2014-08-30 20:54:38 +02:00
0bafcf6f46 forgot to test view count 2014-08-30 20:46:25 +02:00
bbc9dc56f6 Add support for HornBunny 2014-08-30 20:43:03 +02:00
72c65d39ff Add support for AnySex 2014-08-30 18:37:45 +02:00
676e3ecf24 Add support for Vporn 2014-08-30 17:17:47 +02:00
78272a076e Add SSA (.ass) subtitle output for Crunchyroll 2014-08-30 12:48:56 +01:00
723e04d0be Add login support to Crunchyroll extractor 2014-08-29 22:32:03 +01:00
08a36c3569 [test_download] Check expected file minsize and md5 only when download is not skipped 2014-08-29 18:42:42 +07:00
37709fae89 release 2014.08.29 2014-08-29 01:07:30 +02:00
a81e4eb69d [promptfile] Remove quality=1 and leave it to default 2014-08-29 01:07:18 +02:00
8e72edfb19 [promptfile] Add new extractor 2014-08-29 00:20:10 +03:00
863f08a92e release 2014.08.28.2 2014-08-28 18:03:29 +02:00
de2d9f5f1b [rtlnl] Add support for progressive videos (Closes #3603) 2014-08-28 22:54:06 +07:00
a520c11241 release 2014.08.28.1 2014-08-28 14:01:01 +02:00
b94744d157 [dropbox] Make sure ?dl=0 is ignore (Fixes #3605) 2014-08-28 14:00:55 +02:00
753727cded [test_download] Expect a minimum file size
This should detect when we're downloading a small text file by accident.
2014-08-28 13:59:45 +02:00
daebaab692 [extractor/common] Correct typo 2014-08-28 13:04:49 +02:00
3524cc25ca [sportdeutschland] Add support for more plain videos 2014-08-28 10:55:32 +02:00
29a7e1f261 release 2014.08.28 2014-08-28 01:38:15 +02:00
df53b6358d [veehd] Modernize 2014-08-28 01:37:57 +02:00
f1a9d64eea [extractor/common] Modernize 2014-08-28 01:04:43 +02:00
27f774862d Merge remote-tracking branch 'origin/master' 2014-08-28 01:01:04 +02:00
b759a0d4d2 fix up imports 2014-08-28 01:00:59 +02:00
22a6f15061 Move playlist tests to extractors.
From now on, test_download will run these tests. That means we benefit not only from the networking setup in there, but also from the other tests (for example test_all_urls to find problems with _VALID_URLs).
2014-08-28 00:58:24 +02:00
259454525f [nhl] Modernize 2014-08-27 19:52:55 +02:00
ce6815aad3 [dailymotion] Correct test case 2014-08-27 19:20:20 +02:00
4e408e479b [utils] Modernize tests 2014-08-27 19:11:45 +02:00
a9d3890fcb [ted] Remove md5sum from test
This failed multiple times, and the md5sum is not that important anyways.
2014-08-27 17:12:13 +02:00
2857084000 [test_download] Improve playlist handling 2014-08-27 17:11:45 +02:00
7798fad535 [downloader/rtmp] Use check_exectuble 2014-08-27 15:56:27 +02:00
baf2907501 [downloader/hls] Return False if ffmpeg or avconv couldn't be found 2014-08-27 15:50:47 +02:00
3c765c68c4 [downloader/hls] Use check_executable 2014-08-27 15:49:07 +02:00
29153f49b3 [downloader/hls] Use the correct program when reporting an error 2014-08-27 15:40:02 +02:00
071a236c5a [yahoo] Add one more pattern for content id 2014-08-27 18:59:28 +07:00
10437550f4 [cbs] Update test video id 2014-08-27 18:41:24 +07:00
2929b3e71d [vimeo] Move all testcases to extractors and clean up
Previously, these extractors all defined their own suitable methods.
No test_all_urls tests that everything is in order, automatically :)
2014-08-27 11:36:02 +02:00
22449fa624 Improve test_all_urls output 2014-08-27 11:35:43 +02:00
d36d3f420c [vimeo] Move test case to where it belongs 2014-08-27 11:13:42 +02:00
60ac04e57c [facebook] Match video.php URLs 2014-08-27 11:08:47 +02:00
a7680bf330 release 2014.08.27.1 2014-08-27 02:37:23 +02:00
6d3d3fc083 [ard] Add suppor for plain ARD downloads (Fixes #3546) 2014-08-27 02:36:57 +02:00
aff216edf4 [generic] Prevent <video> search from skipping over empty sources (#3546) 2014-08-27 02:09:59 +02:00
1cb6dcdbbe [generic] Do not download images as videos by accident 2014-08-27 02:07:11 +02:00
3f514a353e release 2014.08.27 2014-08-27 01:44:54 +02:00
da9ec3b932 [muscivault] Add extractor (Fixes #3593) 2014-08-27 01:44:47 +02:00
191b7cbba9 [mfs] Modernize 2014-08-27 01:04:32 +02:00
e8c59b9642 release 2014.08.26 2014-08-26 21:30:52 +02:00
6abb066128 [sockshare] Fix title extraction (Fixes #3592) 2014-08-26 21:30:30 +02:00
8f1ea7cbb6 [empflix] Revert to XML parser
Don't rely on the XML being broken (if they fix it, our code wouldn't work anymore).
Instead, use the transform function we already have :)

This partially reverts commit c7bee2a725.
2014-08-26 15:51:42 +02:00
a204c85408 [ign] Fix extraction of video in articles 2014-08-26 15:38:29 +02:00
15a1f4b8fe [empflix] Extract thumbnail 2014-08-26 20:10:36 +07:00
c7bee2a725 [empflix] Adapt to malformed config XML 2014-08-26 20:07:28 +07:00
dbc1366b50 [mixcloud] Use a HEAD request when checking if the url is valid 2014-08-26 14:55:15 +02:00
704df56da7 [sportdeutschland] add new extractor 2014-08-26 12:51:13 +02:00
33ac271ba7 [utils] Let request headers override standard headers
What was I thinking when writing this?
2014-08-26 11:51:48 +02:00
0963f92f23 [eighttracks] modernize 2014-08-26 11:31:23 +02:00
9a66c1079c release 2014.08.25.3 2014-08-25 18:38:10 +02:00
f971dcbba0 Merge branch 'master' of github.com:rg3/youtube-dl 2014-08-25 18:36:42 +02:00
0990305d2a [generic] Fix rss under Python 2.x and move test to extractor 2014-08-25 18:03:01 +02:00
bcc069a937 [generic] Remove debug statement 2014-08-25 17:21:58 +02:00
34708e1bb6 [bliptv] Remove superfluous characters in _VALID_URL regex 2014-08-25 17:16:11 +02:00
829476b80a [googlesearch] Move test to extractor 2014-08-25 17:02:52 +02:00
1dd70fe330 release 2014.08.25.2 2014-08-25 16:52:28 +02:00
067e922295 release 2014.08.25.1 2014-08-25 16:41:05 +02:00
c28df2478f [wat] Use server time and pass country argument (Closes #3579) 2014-08-25 20:21:33 +07:00
241f7a8ade Merge remote-tracking branch 'JGjorgji/fix-leading-zeroes' 2014-08-25 13:59:19 +02:00
b252735910 [extractor/common] Generate better f4m format IDs 2014-08-25 13:03:08 +02:00
7adcbe7594 [rtlnl] Extract duration 2014-08-25 12:59:53 +02:00
8d31fa3cce [execafterdownload] Simplify (#3569) 2014-08-25 10:18:01 +02:00
1f06864e9a [wat] Remove unused import 2014-08-25 10:15:32 +02:00
348ae0a79e Merge remote-tracking branch 'mcd1992/exec_after_download' 2014-08-25 09:44:11 +02:00
528d455632 release 2014.08.25 2014-08-25 09:35:46 +02:00
ba5d51b340 [vimeo] Always pass in referer (Fixes #3582) 2014-08-25 09:35:37 +02:00
7833d941bb Rebased with upstream/master 2014-08-24 15:04:50 -05:00
a2360a4c80 Moved from os.system to subprocess.call 2014-08-24 14:38:43 -05:00
a7cacbca2b Implemented --exec option. 2014-08-24 14:38:43 -05:00
c6b4132a0a renamed for consistency 2014-08-24 18:49:04 +02:00
ad260c90ab Filenames are padded according to the playlist length 2014-08-24 18:23:32 +02:00
b8313f07bc release 2014.08.24.6 2014-08-24 15:19:33 +02:00
92a17d28ac [wat] Make geolock a warning (Fixes #3579) 2014-08-24 15:19:21 +02:00
5f90042bd6 [generic] remove unused imports 2014-08-24 14:28:58 +02:00
9480d1a566 Merge remote-tracking branch 'riking/twofactor' 2014-08-24 07:14:23 +02:00
36b0079f23 Credit @olebowle for GameOne:playlist (#3247) 2014-08-24 07:06:54 +02:00
28028629b9 [gameone:playlist] Move test to extractor 2014-08-24 07:05:49 +02:00
11f75cac3d Merge remote-tracking branch 'olebowle/gameone' 2014-08-24 07:02:29 +02:00
e673db0194 release 2014.08.24.5 2014-08-24 06:58:47 +02:00
ebab4520ff [generic] Use default opener for HEAD request (Fixes #3528) 2014-08-24 06:58:11 +02:00
a71d1414eb release 2014.08.24.4 2014-08-24 06:42:05 +02:00
423817c468 [expotv] Add new extractor (Fixes #3552) 2014-08-24 06:41:55 +02:00
51ed9fce09 [pornotube] Modernize 2014-08-24 06:16:24 +02:00
d43aeb1d00 release 2014.08.24.3 2014-08-24 05:32:31 +02:00
4d805e063c [generic] Automatic detection of flow player and age_limit (Fixes #3576) 2014-08-24 05:31:32 +02:00
24e5e24166 release 2014.08.24.2 2014-08-24 04:47:38 +02:00
4d54ef20a2 [ministrygrid] Add extractor (Fixes #2900) 2014-08-24 04:47:28 +02:00
54036b3991 [wayofthemaster] Remove unused import 2014-08-24 04:18:09 +02:00
e5402ac120 [wayofthemaster] Add extractor (Fixes #3575) 2014-08-24 04:14:02 +02:00
f56f8399c7 [ebaumsworld] Remove spurious determine_ext 2014-08-24 03:37:19 +02:00
cf0c5fa3a1 [ebaumsworld] Modernize 2014-08-24 03:31:38 +02:00
165250ff5e Remove debug prints 2014-08-16 14:49:30 -07:00
83317f6938 [youtube] Add two-factor account signin (TOTP only)
Additional work is required to prompt the user for the SMS or phone call codes, as there is no framework currently to prompt the user during an extraction operation.

Fixes #3533
2014-08-16 14:48:17 -07:00
1a9b9649fb Check load_more_widget_html for feed paging 2014-07-27 17:19:26 -07:00
8c778adc39 [gameone] simplify playlist extractor 2014-07-23 10:00:50 +02:00
71b6065009 [gameone] add playlist test 2014-07-23 09:32:01 +02:00
c065fd35ae [gameone] add playlist capability 2014-07-13 12:16:25 +02:00
195 changed files with 9058 additions and 2948 deletions

3
.gitignore vendored
View File

@ -11,6 +11,7 @@ MANIFEST
README.txt
youtube-dl.1
youtube-dl.bash-completion
youtube-dl.fish
youtube-dl
youtube-dl.exe
youtube-dl.tar.gz
@ -26,5 +27,7 @@ updates_key.pem
*.m4a
*.m4v
*.part
*.swp
test/testdata
.tox
youtube-dl.zsh

View File

@ -1 +0,0 @@
2012.12.99

View File

@ -2,5 +2,6 @@ include README.md
include test/*.py
include test/*.json
include youtube-dl.bash-completion
include youtube-dl.fish
include youtube-dl.1
recursive-include docs Makefile conf.py *.rst

View File

@ -1,7 +1,7 @@
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
clean:
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish
cleanall: clean
rm -f youtube-dl youtube-dl.exe
@ -9,6 +9,7 @@ cleanall: clean
PREFIX ?= /usr/local
BINDIR ?= $(PREFIX)/bin
MANDIR ?= $(PREFIX)/man
SHAREDIR ?= $(PREFIX)/share
PYTHON ?= /usr/bin/env python
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
@ -22,13 +23,17 @@ else
endif
endif
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
install -d $(DESTDIR)$(BINDIR)
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
install -d $(DESTDIR)$(MANDIR)/man1
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
test:
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
@ -36,9 +41,9 @@ test:
tar: youtube-dl.tar.gz
.PHONY: all clean install test tar bash-completion pypi-files
.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
@ -64,7 +69,17 @@ youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-co
bash-completion: youtube-dl.bash-completion
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
python devscripts/zsh-completion.py
zsh-completion: youtube-dl.zsh
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
python devscripts/fish-completion.py
fish-completion: youtube-dl.fish
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
--exclude '*.DS_Store' \
--exclude '*.kate-swp' \
@ -78,5 +93,6 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
-- \
bin devscripts test youtube_dl docs \
LICENSE README.md README.txt \
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
youtube-dl.zsh youtube-dl.fish setup.py \
youtube-dl

View File

@ -99,8 +99,6 @@ which means you can modify it, redistribute it or use it however you like.
downloaded videos in it.
--include-ads Download advertisements as well
(experimental)
--youtube-include-dash-manifest Try to download the DASH manifest on
YouTube videos (experimental)
## Download Options:
-r, --rate-limit LIMIT maximum download rate in bytes per second
@ -158,7 +156,8 @@ which means you can modify it, redistribute it or use it however you like.
downloads if possible.
--no-continue do not resume partially downloaded files
(restart from beginning)
--no-part do not use .part files
--no-part do not use .part files - write directly
into output file
--no-mtime do not use the Last-modified header to set
the file modification time
--write-description write video description to a .description
@ -216,7 +215,7 @@ which means you can modify it, redistribute it or use it however you like.
information about the video. (Currently
supported only for YouTube)
--user-agent UA specify a custom user agent
--referer REF specify a custom referer, use if the video
--referer URL specify a custom referer, use if the video
access is restricted to one domain
--add-header FIELD:VALUE specify a custom HTTP header and its value,
separated by a colon ':'. You can use this
@ -227,17 +226,22 @@ which means you can modify it, redistribute it or use it however you like.
## Video Format Options:
-f, --format FORMAT video format code, specify the order of
preference using slashes: "-f 22/17/18".
"-f mp4" and "-f flv" are also supported.
You can also use the special names "best",
"bestvideo", "bestaudio", "worst",
"worstvideo" and "worstaudio". By default,
youtube-dl will pick the best quality.
preference using slashes: -f 22/17/18 . -f
mp4 , -f m4a and -f flv are also
supported. You can also use the special
names "best", "bestvideo", "bestaudio",
"worst", "worstvideo" and "worstaudio". By
default, youtube-dl will pick the best
quality. Use commas to download multiple
audio formats, such as -f
136/137/mp4/bestvideo,140/m4a/bestaudio
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific
one is requested
--max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats
--youtube-skip-dash-manifest Do not download the DASH manifest on
YouTube videos
## Subtitle Options:
--write-sub write subtitle file
@ -253,8 +257,9 @@ which means you can modify it, redistribute it or use it however you like.
language tags like 'en,pt'
## Authentication Options:
-u, --username USERNAME account username
-u, --username USERNAME login with this account ID
-p, --password PASSWORD account password
-2, --twofactor TWOFACTOR two-factor auth code
-n, --netrc use .netrc authentication data
--video-password PASSWORD video password (vimeo, smotri)
@ -263,7 +268,7 @@ which means you can modify it, redistribute it or use it however you like.
(requires ffmpeg or avconv and ffprobe or
avprobe)
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
"opus", or "wav"; best by default
"opus", or "wav"; "best" by default
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
insert a value between 0 (better) and 9
(worse) for VBR or a specific bitrate like
@ -287,6 +292,10 @@ which means you can modify it, redistribute it or use it however you like.
postprocessors (default)
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
postprocessors
--exec CMD Execute a command on the file after
downloading, similar to find's -exec
syntax. Example: --exec 'adb push {}
/sdcard/Music/ && rm {}'
# CONFIGURATION
@ -340,6 +349,38 @@ $ youtube-dl --dateafter 20000101 --datebefore 20091231
# FAQ
### How do I update youtube-dl?
If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
sudo apt-get remove -y youtube-dl
Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
```
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
sudo chmod a+x /usr/local/bin/youtube-dl
hash -r
```
Again, from then on you'll be able to update with `sudo youtube-dl -U`.
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version. See above for a way to update.
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
### Can you please put the -b option back?
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
@ -415,8 +456,6 @@ If you want to add support for a new site, you can follow this quick list (assum
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -424,7 +463,7 @@ If you want to add support for a new site, you can follow this quick list (assum
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://yourextractor.com/watch/42',
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
'info_dict': {
'id': '42',
'ext': 'mp4',
@ -439,8 +478,7 @@ If you want to add support for a new site, you can follow this quick list (assum
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
# TODO more code goes here, for example ...
webpage = self._download_webpage(url, video_id)

View File

@ -0,0 +1,5 @@
{{commands}}
complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"

48
devscripts/fish-completion.py Executable file
View File

@ -0,0 +1,48 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import optparse
import os
from os.path import dirname as dirn
import sys
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
from youtube_dl.utils import shell_quote
FISH_COMPLETION_FILE = 'youtube-dl.fish'
FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
EXTRA_ARGS = {
'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'],
# Options that need a file parameter
'download-archive': ['--require-parameter'],
'cookies': ['--require-parameter'],
'load-info': ['--require-parameter'],
'batch-file': ['--require-parameter'],
}
def build_completion(opt_parser):
commands = []
for group in opt_parser.option_groups:
for option in group.option_list:
long_option = option.get_opt_string().strip('-')
help_msg = shell_quote([option.help])
complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
if option._short_opts:
complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
if option.help != optparse.SUPPRESS_HELP:
complete_cmd += ['--description', option.help]
complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
commands.append(shell_quote(complete_cmd))
with open(FISH_COMPLETION_TEMPLATE) as f:
template = f.read()
filled_template = template.replace('{{commands}}', '\n'.join(commands))
with open(FISH_COMPLETION_FILE, 'w') as f:
f.write(filled_template)
parser = youtube_dl.parseOpts()[0]
build_completion(parser)

View File

@ -73,7 +73,6 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
git checkout HEAD -- youtube-dl youtube-dl.exe
/bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done

View File

@ -0,0 +1,28 @@
#compdef youtube-dl
__youtube_dl() {
local curcontext="$curcontext" fileopts diropts cur prev
typeset -A opt_args
fileopts="{{fileopts}}"
diropts="{{diropts}}"
cur=$words[CURRENT]
case $cur in
:)
_arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
;;
*)
prev=$words[CURRENT-1]
if [[ ${prev} =~ ${fileopts} ]]; then
_path_files
elif [[ ${prev} =~ ${diropts} ]]; then
_path_files -/
elif [[ ${prev} == "--recode-video" ]]; then
_arguments '*: :(mp4 flv ogg webm mkv)'
else
_arguments '*: :({{flags}})'
fi
;;
esac
}
__youtube_dl

46
devscripts/zsh-completion.py Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
import os
from os.path import dirname as dirn
import sys
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
def build_completion(opt_parser):
opts = [opt for group in opt_parser.option_groups
for opt in group.option_list]
opts_file = [opt for opt in opts if opt.metavar == "FILE"]
opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
fileopts = []
for opt in opts_file:
if opt._short_opts:
fileopts.extend(opt._short_opts)
if opt._long_opts:
fileopts.extend(opt._long_opts)
diropts = []
for opt in opts_dir:
if opt._short_opts:
diropts.extend(opt._short_opts)
if opt._long_opts:
diropts.extend(opt._long_opts)
flags = [opt.get_opt_string() for opt in opts]
with open(ZSH_COMPLETION_TEMPLATE) as f:
template = f.read()
template = template.replace("{{fileopts}}", "|".join(fileopts))
template = template.replace("{{diropts}}", "|".join(diropts))
template = template.replace("{{flags}}", " ".join(flags))
with open(ZSH_COMPLETION_FILE, "w") as f:
f.write(template)
parser = youtube_dl.parseOpts()[0]
build_completion(parser)

View File

@ -48,6 +48,7 @@ if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
else:
files_spec = [
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
('etc/fish/completions', ['youtube-dl.fish']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import errno
import io
import hashlib
@ -12,6 +14,7 @@ from youtube_dl import YoutubeDL
from youtube_dl.utils import (
compat_str,
preferredencoding,
write_string,
)
@ -40,10 +43,10 @@ def report_warning(message):
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
_msg_header = u'\033[0;33mWARNING:\033[0m'
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = u'WARNING:'
output = u'%s %s\n' % (_msg_header, message)
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
@ -102,19 +105,23 @@ def expect_info_dict(self, expected_dict, got_dict):
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, compat_str) and match_rex.match(got),
u'field %s (value: %r) should match %r' % (info_field, got, match_str))
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
match_rex.match(got),
'field %s (value: %r) should match %r' % (info_field, got, match_str))
elif isinstance(expected, type):
got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected),
u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field))
else:
got = got_dict.get(info_field)
self.assertEqual(expected, got,
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# Check for the presence of mandatory fields
if got_dict.get('_type') != 'playlist':
@ -122,7 +129,7 @@ def expect_info_dict(self, expected_dict, got_dict):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
@ -130,7 +137,15 @@ def expect_info_dict(self, expected_dict, got_dict):
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'")
else:
return repr(v)
info_dict_str = ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items())
write_string('\n"info_dict": {' + info_dict_str + '}\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (

View File

@ -27,7 +27,6 @@
"rejecttitle": null,
"retries": 10,
"simulate": false,
"skip_download": false,
"subtitleslang": null,
"subtitlesformat": "srt",
"test": true,

View File

@ -109,7 +109,9 @@ class TestAllURLsMatching(unittest.TestCase):
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
else:
self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))
self.assertFalse(
ie.suitable(url),
'%s should not match URL %r . That URL belongs to %s.' % (type(ie).__name__, url, tc['name']))
def test_keywords(self):
self.assertMatch(':ytsubs', ['youtube:subscriptions'])
@ -141,32 +143,6 @@ class TestAllURLsMatching(unittest.TestCase):
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
def test_ComedyCentralShows(self):
self.assertMatch(
'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
['ComedyCentralShows'])
self.assertMatch(
'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
['ComedyCentralShows'])
self.assertMatch(
'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
['ComedyCentralShows'])
self.assertMatch(
'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
['ComedyCentralShows'])
self.assertMatch(
'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
['ComedyCentralShows'])
self.assertMatch(
'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
['ComedyCentralShows'])
self.assertMatch(
'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
['ComedyCentralShows'])
self.assertMatch(
'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
['ComedyCentralShows'])
def test_yahoo_https(self):
# https://github.com/rg3/youtube-dl/issues/2701
self.assertMatch(

59
test/test_cache.py Normal file
View File

@ -0,0 +1,59 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import shutil
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
def _mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
self.tearDown()
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_cache(self):
ydl = FakeYDL({
'cachedir': self.test_dir,
})
c = Cache(ydl)
obj = {'x': 1, 'y': ['ä', '\\a', True]}
self.assertEqual(c.load('test_cache', 'k.'), None)
c.store('test_cache', 'k.', obj)
self.assertEqual(c.load('test_cache', 'k2'), None)
self.assertFalse(_is_empty(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), obj)
self.assertEqual(c.load('test_cache', 'y'), None)
self.assertEqual(c.load('test_cache2', 'k.'), None)
c.remove()
self.assertFalse(os.path.exists(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), None)
if __name__ == '__main__':
unittest.main()

View File

@ -7,6 +7,7 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertGreaterEqual,
get_params,
gettestcases,
expect_info_dict,
@ -27,6 +28,7 @@ from youtube_dl.utils import (
compat_HTTPError,
DownloadError,
ExtractorError,
format_bytes,
UnavailableVideoError,
)
from youtube_dl.extractor import get_info_extractor
@ -102,8 +104,11 @@ def generator(test_case):
def get_tc_filename(tc):
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
def try_rm_tcs_files():
for tc in test_cases:
res_dict = None
def try_rm_tcs_files(tcs=None):
if tcs is None:
tcs = test_cases
for tc in tcs:
tc_filename = get_tc_filename(tc)
try_rm(tc_filename)
try_rm(tc_filename + '.part')
@ -134,31 +139,62 @@ def generator(test_case):
if is_playlist:
self.assertEqual(res_dict['_type'], 'playlist')
self.assertTrue('entries' in res_dict)
expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
if 'playlist_mincount' in test_case:
self.assertGreaterEqual(
assertGreaterEqual(
self,
len(res_dict['entries']),
test_case['playlist_mincount'],
'Expected at least %d in playlist %s, but got only %d' % (
test_case['playlist_mincount'], test_case['url'],
len(res_dict['entries'])))
if 'playlist_count' in test_case:
self.assertEqual(
len(res_dict['entries']),
test_case['playlist_count'],
'Expected %d entries in playlist %s, but got %d.' % (
test_case['playlist_count'],
test_case['url'],
len(res_dict['entries']),
))
if 'playlist_duration_sum' in test_case:
got_duration = sum(e['duration'] for e in res_dict['entries'])
self.assertEqual(
test_case['playlist_duration_sum'], got_duration)
for tc in test_cases:
tc_filename = get_tc_filename(tc)
if not test_case.get('params', {}).get('skip_download', False):
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
self.assertTrue(tc_filename in finished_hook_called)
expected_minsize = tc.get('file_minsize', 10000)
if expected_minsize is not None:
if params.get('test'):
expected_minsize = max(expected_minsize, 10000)
got_fsize = os.path.getsize(tc_filename)
assertGreaterEqual(
self, got_fsize, expected_minsize,
'Expected %s to be at least %s, but it\'s only %s ' %
(tc_filename, format_bytes(expected_minsize),
format_bytes(got_fsize)))
if 'md5' in tc:
md5_for_file = _file_md5(tc_filename)
self.assertEqual(md5_for_file, tc['md5'])
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
self.assertTrue(os.path.exists(info_json_fn))
if 'md5' in tc:
md5_for_file = _file_md5(tc_filename)
self.assertEqual(md5_for_file, tc['md5'])
with io.open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof)
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
finally:
try_rm_tcs_files()
if is_playlist and res_dict is not None and res_dict.get('entries'):
# Remove all other files that may have been extracted if the
# extractor returns full results even with extract_flat
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
try_rm_tcs_files(res_tcs)
return test_template

View File

@ -1,411 +0,0 @@
#!/usr/bin/env python
# encoding: utf-8
## DEPRECATED FILE!
# Add new tests to the extractors themselves, like this:
# _TEST = {
# 'url': 'http://example.com/playlist/42',
# 'playlist_mincount': 99,
# 'info_dict': {
# 'id': '42',
# 'title': 'Playlist number forty-two',
# }
# }
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import (
assertRegexpMatches,
assertGreaterEqual,
expect_info_dict,
FakeYDL,
)
from youtube_dl.extractor import (
AcademicEarthCourseIE,
DailymotionPlaylistIE,
DailymotionUserIE,
VimeoChannelIE,
VimeoUserIE,
VimeoAlbumIE,
VimeoGroupsIE,
VineUserIE,
UstreamChannelIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE,
TeacherTubeUserIE,
LivestreamIE,
LivestreamOriginalIE,
NHLVideocenterIE,
BambuserChannelIE,
BandcampAlbumIE,
SmotriCommunityIE,
SmotriUserIE,
IviCompilationIE,
ImdbListIE,
KhanAcademyIE,
EveryonesMixtapeIE,
RutubeChannelIE,
RutubePersonIE,
GoogleSearchIE,
GenericIE,
TEDIE,
ToypicsUserIE,
XTubeUserIE,
InstagramUserIE,
CSpanIE,
AolIE,
)
class TestPlaylists(unittest.TestCase):
def assertIsPlaylist(self, info):
"""Make sure the info has '_type' set to 'playlist'"""
self.assertEqual(info['_type'], 'playlist')
def test_dailymotion_playlist(self):
dl = FakeYDL()
ie = DailymotionPlaylistIE(dl)
result = ie.extract('http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'SPORT')
self.assertTrue(len(result['entries']) > 20)
def test_dailymotion_user(self):
dl = FakeYDL()
ie = DailymotionUserIE(dl)
result = ie.extract('https://www.dailymotion.com/user/nqtv')
self.assertIsPlaylist(result)
assertGreaterEqual(self, len(result['entries']), 100)
self.assertEqual(result['title'], 'Rémi Gaillard')
def test_vimeo_channel(self):
dl = FakeYDL()
ie = VimeoChannelIE(dl)
result = ie.extract('http://vimeo.com/channels/tributes')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'Vimeo Tributes')
self.assertTrue(len(result['entries']) > 24)
def test_vimeo_user(self):
dl = FakeYDL()
ie = VimeoUserIE(dl)
result = ie.extract('http://vimeo.com/nkistudio/videos')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'Nki')
self.assertTrue(len(result['entries']) > 65)
def test_vimeo_album(self):
dl = FakeYDL()
ie = VimeoAlbumIE(dl)
result = ie.extract('http://vimeo.com/album/2632481')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'Staff Favorites: November 2013')
self.assertTrue(len(result['entries']) > 12)
def test_vimeo_groups(self):
dl = FakeYDL()
ie = VimeoGroupsIE(dl)
result = ie.extract('http://vimeo.com/groups/rolexawards')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'Rolex Awards for Enterprise')
self.assertTrue(len(result['entries']) > 72)
def test_vine_user(self):
dl = FakeYDL()
ie = VineUserIE(dl)
result = ie.extract('https://vine.co/Visa')
self.assertIsPlaylist(result)
assertGreaterEqual(self, len(result['entries']), 47)
def test_ustream_channel(self):
dl = FakeYDL()
ie = UstreamChannelIE(dl)
result = ie.extract('http://www.ustream.tv/channel/channeljapan')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '10874166')
assertGreaterEqual(self, len(result['entries']), 54)
def test_soundcloud_set(self):
dl = FakeYDL()
ie = SoundcloudSetIE(dl)
result = ie.extract('https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'The Royal Concept EP')
assertGreaterEqual(self, len(result['entries']), 6)
def test_soundcloud_user(self):
dl = FakeYDL()
ie = SoundcloudUserIE(dl)
result = ie.extract('https://soundcloud.com/the-concept-band')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '9615865')
assertGreaterEqual(self, len(result['entries']), 12)
def test_soundcloud_likes(self):
dl = FakeYDL()
ie = SoundcloudUserIE(dl)
result = ie.extract('https://soundcloud.com/the-concept-band/likes')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '9615865')
assertGreaterEqual(self, len(result['entries']), 1)
def test_soundcloud_playlist(self):
dl = FakeYDL()
ie = SoundcloudPlaylistIE(dl)
result = ie.extract('http://api.soundcloud.com/playlists/4110309')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '4110309')
self.assertEqual(result['title'], 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]')
assertRegexpMatches(
self, result['description'], r'.*?TILT Brass - Bowery Poetry Club')
self.assertEqual(len(result['entries']), 6)
def test_livestream_event(self):
dl = FakeYDL()
ie = LivestreamIE(dl)
result = ie.extract('http://new.livestream.com/tedx/cityenglish')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'TEDCity2.0 (English)')
assertGreaterEqual(self, len(result['entries']), 4)
def test_livestreamoriginal_folder(self):
dl = FakeYDL()
ie = LivestreamOriginalIE(dl)
result = ie.extract('https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'a07bf706-d0e4-4e75-a747-b021d84f2fd3')
assertGreaterEqual(self, len(result['entries']), 28)
def test_nhl_videocenter(self):
dl = FakeYDL()
ie = NHLVideocenterIE(dl)
result = ie.extract('http://video.canucks.nhl.com/videocenter/console?catid=999')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '999')
self.assertEqual(result['title'], 'Highlights')
self.assertEqual(len(result['entries']), 12)
def test_bambuser_channel(self):
dl = FakeYDL()
ie = BambuserChannelIE(dl)
result = ie.extract('http://bambuser.com/channel/pixelversity')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'pixelversity')
assertGreaterEqual(self, len(result['entries']), 60)
def test_bandcamp_album(self):
dl = FakeYDL()
ie = BandcampAlbumIE(dl)
result = ie.extract('http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'Hierophany of the Open Grave')
assertGreaterEqual(self, len(result['entries']), 9)
def test_smotri_community(self):
dl = FakeYDL()
ie = SmotriCommunityIE(dl)
result = ie.extract('http://smotri.com/community/video/kommuna')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'kommuna')
self.assertEqual(result['title'], 'КПРФ')
assertGreaterEqual(self, len(result['entries']), 4)
def test_smotri_user(self):
dl = FakeYDL()
ie = SmotriUserIE(dl)
result = ie.extract('http://smotri.com/user/inspector')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'inspector')
self.assertEqual(result['title'], 'Inspector')
assertGreaterEqual(self, len(result['entries']), 9)
def test_AcademicEarthCourse(self):
dl = FakeYDL()
ie = AcademicEarthCourseIE(dl)
result = ie.extract('http://academicearth.org/playlists/laws-of-nature/')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'laws-of-nature')
self.assertEqual(result['title'], 'Laws of Nature')
self.assertEqual(result['description'],u'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.')# u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
self.assertEqual(len(result['entries']), 4)
def test_ivi_compilation(self):
dl = FakeYDL()
ie = IviCompilationIE(dl)
result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'dvoe_iz_lartsa')
self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008)')
assertGreaterEqual(self, len(result['entries']), 24)
def test_ivi_compilation_season(self):
dl = FakeYDL()
ie = IviCompilationIE(dl)
result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa/season1')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'dvoe_iz_lartsa/season1')
self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008) 1 сезон')
assertGreaterEqual(self, len(result['entries']), 12)
def test_imdb_list(self):
dl = FakeYDL()
ie = ImdbListIE(dl)
result = ie.extract('http://www.imdb.com/list/JFs9NWw6XI0')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'JFs9NWw6XI0')
self.assertEqual(result['title'], 'March 23, 2012 Releases')
self.assertEqual(len(result['entries']), 7)
def test_khanacademy_topic(self):
dl = FakeYDL()
ie = KhanAcademyIE(dl)
result = ie.extract('https://www.khanacademy.org/math/applied-math/cryptography')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'cryptography')
self.assertEqual(result['title'], 'Journey into cryptography')
self.assertEqual(result['description'], 'How have humans protected their secret messages through history? What has changed today?')
assertGreaterEqual(self, len(result['entries']), 3)
def test_EveryonesMixtape(self):
dl = FakeYDL()
ie = EveryonesMixtapeIE(dl)
result = ie.extract('http://everyonesmixtape.com/#/mix/m7m0jJAbMQi')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'm7m0jJAbMQi')
self.assertEqual(result['title'], 'Driving')
self.assertEqual(len(result['entries']), 24)
def test_rutube_channel(self):
dl = FakeYDL()
ie = RutubeChannelIE(dl)
result = ie.extract('http://rutube.ru/tags/video/1800/')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '1800')
assertGreaterEqual(self, len(result['entries']), 68)
def test_rutube_person(self):
dl = FakeYDL()
ie = RutubePersonIE(dl)
result = ie.extract('http://rutube.ru/video/person/313878/')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '313878')
assertGreaterEqual(self, len(result['entries']), 37)
def test_multiple_brightcove_videos(self):
# https://github.com/rg3/youtube-dl/issues/2283
dl = FakeYDL()
ie = GenericIE(dl)
result = ie.extract('http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'always-never-nuclear-command-and-control')
self.assertEqual(result['title'], 'Always/Never: A Little-Seen Movie About Nuclear Command and Control : The New Yorker')
self.assertEqual(len(result['entries']), 3)
def test_GoogleSearch(self):
dl = FakeYDL()
ie = GoogleSearchIE(dl)
result = ie.extract('gvsearch15:python language')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'python language')
self.assertEqual(result['title'], 'python language')
self.assertEqual(len(result['entries']), 15)
def test_generic_rss_feed(self):
dl = FakeYDL()
ie = GenericIE(dl)
result = ie.extract('http://phihag.de/2014/youtube-dl/rss.xml')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'http://phihag.de/2014/youtube-dl/rss.xml')
self.assertEqual(result['title'], 'Zero Punctuation')
self.assertTrue(len(result['entries']) > 10)
def test_ted_playlist(self):
dl = FakeYDL()
ie = TEDIE(dl)
result = ie.extract('http://www.ted.com/playlists/who_are_the_hackers')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '10')
self.assertEqual(result['title'], 'Who are the hackers?')
assertGreaterEqual(self, len(result['entries']), 6)
def test_toypics_user(self):
dl = FakeYDL()
ie = ToypicsUserIE(dl)
result = ie.extract('http://videos.toypics.net/Mikey')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'Mikey')
assertGreaterEqual(self, len(result['entries']), 17)
def test_xtube_user(self):
dl = FakeYDL()
ie = XTubeUserIE(dl)
result = ie.extract('http://www.xtube.com/community/profile.php?user=greenshowers')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'greenshowers')
assertGreaterEqual(self, len(result['entries']), 155)
def test_InstagramUser(self):
dl = FakeYDL()
ie = InstagramUserIE(dl)
result = ie.extract('http://instagram.com/porsche')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'porsche')
assertGreaterEqual(self, len(result['entries']), 2)
test_video = next(
e for e in result['entries']
if e['id'] == '614605558512799803_462752227')
dl.add_default_extra_info(test_video, ie, '(irrelevant URL)')
dl.process_video_result(test_video, download=False)
EXPECTED = {
'id': '614605558512799803_462752227',
'ext': 'mp4',
'title': '#Porsche Intelligent Performance.',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'Porsche',
'uploader_id': 'porsche',
'timestamp': 1387486713,
'upload_date': '20131219',
}
expect_info_dict(self, EXPECTED, test_video)
def test_CSpan_playlist(self):
dl = FakeYDL()
ie = CSpanIE(dl)
result = ie.extract(
'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '342759')
self.assertEqual(
result['title'], 'General Motors Ignition Switch Recall')
whole_duration = sum(e['duration'] for e in result['entries'])
self.assertEqual(whole_duration, 14855)
def test_aol_playlist(self):
dl = FakeYDL()
ie = AolIE(dl)
result = ie.extract(
'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], '152147')
self.assertEqual(
result['title'], 'Brace Yourself - Today\'s Weirdest News')
assertGreaterEqual(self, len(result['entries']), 10)
def test_TeacherTubeUser(self):
dl = FakeYDL()
ie = TeacherTubeUserIE(dl)
result = ie.extract('http://www.teachertube.com/user/profile/rbhagwati2')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], 'rbhagwati2')
assertGreaterEqual(self, len(result['entries']), 179)
if __name__ == '__main__':
unittest.main()

View File

@ -15,6 +15,7 @@ from youtube_dl.extractor import (
DailymotionIE,
TEDIE,
VimeoIE,
WallaIE,
)
@ -279,5 +280,32 @@ class TestVimeoSubtitles(BaseTestSubtitles):
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
class TestWallaSubtitles(BaseTestSubtitles):
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
IE = WallaIE
def test_list_subtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['listsubtitles'] = True
info_dict = self.getInfoDict()
self.assertEqual(info_dict, None)
def test_allsubtitles(self):
self.DL.expect_warning(u'Automatic Captions not supported by this server')
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['heb']))
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
def test_nosubtitles(self):
self.DL.expect_warning(u'video doesn\'t have subtitles')
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles), 0)
if __name__ == '__main__':
unittest.main()

View File

@ -1,6 +1,8 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
@ -13,7 +15,6 @@ import io
import json
import xml.etree.ElementTree
#from youtube_dl.utils import htmlentity_transform
from youtube_dl.utils import (
DateRange,
encodeFilename,
@ -21,7 +22,8 @@ from youtube_dl.utils import (
fix_xml_ampersands,
get_meta_content,
orderedSet,
PagedList,
OnDemandPagedList,
InAdvancePagedList,
parse_duration,
read_batch_urls,
sanitize_filename,
@ -39,13 +41,12 @@ from youtube_dl.utils import (
parse_iso8601,
strip_jsonp,
uppercase_escape,
limit_length,
escape_rfc3986,
escape_url,
js_to_json,
)
if sys.version_info < (3, 0):
_compat_str = lambda b: b.decode('unicode-escape')
else:
_compat_str = lambda s: s
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
@ -67,9 +68,9 @@ class TestUtil(unittest.TestCase):
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = _compat_str('\xe4')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
forbidden = '"\0\\/'
@ -91,9 +92,9 @@ class TestUtil(unittest.TestCase):
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
tests = 'a\xe4b\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
@ -101,8 +102,8 @@ class TestUtil(unittest.TestCase):
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
@ -120,7 +121,9 @@ class TestUtil(unittest.TestCase):
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(
unescapeHTML('&eacute;'), 'é')
def test_daterange(self):
_20century = DateRange("19000101","20000101")
@ -136,9 +139,10 @@ class TestUtil(unittest.TestCase):
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
def test_find_xpath_attr(self):
testxml = u'''<root>
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
@ -151,18 +155,18 @@ class TestUtil(unittest.TestCase):
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
def test_meta_parser(self):
testhtml = u'''
testhtml = '''
<head>
<meta name="description" content="foo &amp; bar">
<meta content='Plato' name='author'/>
</head>
'''
get_meta = lambda name: get_meta_content(name, testhtml)
self.assertEqual(get_meta('description'), u'foo & bar')
self.assertEqual(get_meta('description'), 'foo & bar')
self.assertEqual(get_meta('author'), 'Plato')
def test_xpath_with_ns(self):
testxml = u'''<root xmlns:media="http://example.com/">
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
@ -171,8 +175,8 @@ class TestUtil(unittest.TestCase):
doc = xml.etree.ElementTree.fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, u'The Author')
self.assertEqual(find('media:song/url').text, u'http://server.com/download.mp3')
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_smuggle_url(self):
data = {u"ö": u"ö", u"abc": [3]}
@ -187,22 +191,22 @@ class TestUtil(unittest.TestCase):
self.assertEqual(res_data, None)
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename(u'ñ€ß\'.mp4')]
self.assertEqual(shell_quote(args), u"""ffmpeg -i 'ñ€ß'"'"'.mp4'""")
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""")
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
def test_url_basename(self):
self.assertEqual(url_basename(u'http://foo.de/'), u'')
self.assertEqual(url_basename(u'http://foo.de/bar/baz'), u'baz')
self.assertEqual(url_basename(u'http://foo.de/bar/baz?x=y'), u'baz')
self.assertEqual(url_basename(u'http://foo.de/bar/baz#x=y'), u'baz')
self.assertEqual(url_basename(u'http://foo.de/bar/baz/'), u'baz')
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
u'trailer.mp4')
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
@ -213,12 +217,16 @@ class TestUtil(unittest.TestCase):
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
def test_fix_xml_ampersands(self):
self.assertEqual(
@ -241,10 +249,14 @@ class TestUtil(unittest.TestCase):
for i in range(firstid, upto):
yield i
pl = PagedList(get_page, pagesize)
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
@ -255,16 +267,16 @@ class TestUtil(unittest.TestCase):
testPL(5, 2, (20, 99), [])
def test_struct_unpack(self):
self.assertEqual(struct_unpack(u'!B', b'\x00'), (0,))
self.assertEqual(struct_unpack('!B', b'\x00'), (0,))
def test_read_batch_urls(self):
f = io.StringIO(u'''\xef\xbb\xbf foo
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), [u'foo', u'bar', u'baz', u'bam'])
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
@ -281,8 +293,67 @@ class TestUtil(unittest.TestCase):
self.assertEqual(d, [{"id": "532cb", "x": 3}])
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape(u''), u'')
self.assertEqual(uppercase_escape(u'\\U0001d550'), u'𝕐')
self.assertEqual(uppercase_escape(''), '')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
if __name__ == '__main__':
unittest.main()

View File

@ -10,7 +10,6 @@ from test.helper import FakeYDL
from youtube_dl.extractor import (
YoutubeUserIE,
YoutubePlaylistIE,
YoutubeIE,
YoutubeChannelIE,
@ -25,15 +24,6 @@ class TestYoutubeLists(unittest.TestCase):
"""Make sure the info has '_type' set to 'playlist'"""
self.assertEqual(info['_type'], 'playlist')
def test_youtube_playlist(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'ytdl test PL')
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
def test_youtube_playlist_noplaylist(self):
dl = FakeYDL()
dl.params['noplaylist'] = True
@ -41,36 +31,7 @@ class TestYoutubeLists(unittest.TestCase):
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(result['_type'], 'url')
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
def test_issue_673(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('PLBB231211A4F62143')
self.assertTrue(len(result['entries']) > 25)
def test_youtube_playlist_long(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
self.assertIsPlaylist(result)
self.assertTrue(len(result['entries']) >= 799)
def test_youtube_playlist_with_deleted(self):
#651
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
self.assertFalse('pElCt5oNDuI' in ytie_results)
self.assertFalse('KdPEApIVdWM' in ytie_results)
def test_youtube_playlist_empty(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')
self.assertIsPlaylist(result)
self.assertEqual(len(result['entries']), 0)
def test_youtube_course(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
@ -81,34 +42,6 @@ class TestYoutubeLists(unittest.TestCase):
self.assertEqual(len(entries), 25)
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
def test_youtube_channel(self):
dl = FakeYDL()
ie = YoutubeChannelIE(dl)
#test paginated channel
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
self.assertTrue(len(result['entries']) > 90)
#test autogenerated channel
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
self.assertTrue(len(result['entries']) >= 18)
def test_youtube_user(self):
dl = FakeYDL()
ie = YoutubeUserIE(dl)
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
self.assertTrue(len(result['entries']) >= 320)
def test_youtube_safe_search(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
result = ie.extract('PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl')
self.assertEqual(len(result['entries']), 2)
def test_youtube_show(self):
dl = FakeYDL()
ie = YoutubeShowIE(dl)
result = ie.extract('http://www.youtube.com/show/airdisasters')
self.assertTrue(len(result) >= 3)
def test_youtube_mix(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
@ -127,21 +60,5 @@ class TestYoutubeLists(unittest.TestCase):
entries = result['entries']
self.assertEqual(len(entries), 100)
def test_youtube_toplist(self):
dl = FakeYDL()
ie = YoutubeTopListIE(dl)
result = ie.extract('yttoplist:music:Trending')
entries = result['entries']
self.assertTrue(len(entries) >= 5)
def test_youtube_search_url(self):
dl = FakeYDL()
ie = YoutubeSearchURLIE(dl)
result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
entries = result['entries']
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'youtube-dl test video')
self.assertTrue(len(entries) >= 5)
if __name__ == '__main__':
unittest.main()

View File

@ -47,18 +47,6 @@ _TESTS = [
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
'swf',
86,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
),
(
'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
'swf',
'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
'9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
'js',

View File

@ -1,89 +0,0 @@
#!/usr/bin/env python
import sys, os
import json, hashlib
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
try:
raw_input()
except NameError: # Python 3
input()
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
try:
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
if not 'signature' in versions_info:
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
version = versions_info['versions'][versions_info['latest']]
try:
urlh = compat_urllib_request.urlopen(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

Binary file not shown.

View File

@ -28,6 +28,7 @@ from .utils import (
compat_str,
compat_urllib_error,
compat_urllib_request,
escape_url,
ContentTooShortError,
date_from_str,
DateRange,
@ -57,6 +58,7 @@ from .utils import (
YoutubeDLHandler,
prepend_extension,
)
from .cache import Cache
from .extractor import get_info_extractor, gen_extractors
from .downloader import get_suitable_downloader
from .postprocessor import FFmpegMergerPP
@ -133,7 +135,7 @@ class YoutubeDL(object):
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
None to disable filesystem cache.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
@ -172,6 +174,7 @@ class YoutubeDL(object):
The following options are used by the post processors:
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
otherwise prefer avconv.
exec_cmd: Arbitrary command to run after downloading
"""
params = None
@ -194,6 +197,7 @@ class YoutubeDL(object):
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = params
self.cache = Cache(self)
if params.get('bidi_workaround', False):
try:
@ -224,11 +228,11 @@ class YoutubeDL(object):
if (sys.version_info >= (3,) and sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not params['restrictfilenames']):
and not params.get('restrictfilenames', False)):
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all charactes. '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
@ -424,7 +428,7 @@ class YoutubeDL(object):
autonumber_templ = '%0' + str(autonumber_size) + 'd'
template_dict['autonumber'] = autonumber_templ % self._num_downloads
if template_dict.get('playlist_index') is not None:
template_dict['playlist_index'] = '%05d' % template_dict['playlist_index']
template_dict['playlist_index'] = '%0*d' % (len(str(template_dict['n_entries'])), template_dict['playlist_index'])
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
@ -636,6 +640,7 @@ class YoutubeDL(object):
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
@ -703,7 +708,7 @@ class YoutubeDL(object):
if video_formats:
return video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp']
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
@ -804,28 +809,29 @@ class YoutubeDL(object):
if req_format in ('-1', 'all'):
formats_to_download = formats
else:
# We can accept formats requested in the format: 34/5/best, we pick
# the first that is available, starting from left
req_formats = req_format.split('/')
for rf in req_formats:
if re.match(r'.+?\+.+?', rf) is not None:
# Two formats have been requested like '137+139'
format_1, format_2 = rf.split('+')
formats_info = (self.select_format(format_1, formats),
self.select_format(format_2, formats))
if all(formats_info):
selected_format = {
'requested_formats': formats_info,
'format': rf,
'ext': formats_info[0]['ext'],
}
for rfstr in req_format.split(','):
# We can accept formats requested in the format: 34/5/best, we pick
# the first that is available, starting from left
req_formats = rfstr.split('/')
for rf in req_formats:
if re.match(r'.+?\+.+?', rf) is not None:
# Two formats have been requested like '137+139'
format_1, format_2 = rf.split('+')
formats_info = (self.select_format(format_1, formats),
self.select_format(format_2, formats))
if all(formats_info):
selected_format = {
'requested_formats': formats_info,
'format': rf,
'ext': formats_info[0]['ext'],
}
else:
selected_format = None
else:
selected_format = None
else:
selected_format = self.select_format(rf, formats)
if selected_format is not None:
formats_to_download = [selected_format]
break
selected_format = self.select_format(rf, formats)
if selected_format is not None:
formats_to_download.append(selected_format)
break
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
@ -1237,6 +1243,26 @@ class YoutubeDL(object):
def urlopen(self, req):
""" Start an HTTP download """
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
url = req if req_is_string else req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
if req_is_string:
req = url_escaped
else:
req = compat_urllib_request.Request(
url_escaped, data=req.data, headers=req.headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):

View File

@ -72,29 +72,34 @@ __authors__ = (
'Alexander Kirk',
'Erik Johnson',
'Keith Beckman',
'Ole Ernst',
'Aaron McDaniel (mcd1992)',
'Magnus Kolstad',
'Hari Padmanaban',
'Carlos Ramos',
'5moufl',
'lenaten',
)
__license__ = 'Public Domain'
import codecs
import io
import optparse
import os
import random
import shlex
import shutil
import sys
from .options import (
parseOpts,
)
from .utils import (
compat_getpass,
compat_print,
DateRange,
DEFAULT_OUTTMPL,
decodeOption,
get_term_width,
DownloadError,
get_cachedir,
MaxDownloadsReached,
preferredencoding,
read_batch_urls,
@ -108,7 +113,6 @@ from .downloader import (
FileDownloader,
)
from .extractor import gen_extractors
from .version import __version__
from .YoutubeDL import YoutubeDL
from .postprocessor import (
AtomicParsleyPP,
@ -118,474 +122,10 @@ from .postprocessor import (
FFmpegExtractAudioPP,
FFmpegEmbedSubtitlePP,
XAttrMetadataPP,
ExecAfterDownloadPP,
)
def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]):
try:
optionf = open(filename_bytes)
except IOError:
return default # silently skip if file is not present
try:
res = []
for l in optionf:
res += shlex.split(l, comments=True)
finally:
optionf.close()
return res
def _readUserConf():
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
if xdg_config_home:
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
if not os.path.isfile(userConfFile):
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
userConf = _readOptions(userConfFile, None)
if userConf is None:
appdata_dir = os.environ.get('appdata')
if appdata_dir:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
default=None)
if userConf is None:
userConf = _readOptions(
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
default=None)
if userConf is None:
userConf = []
return userConf
def _format_option_string(option):
''' ('-o', '--option') -> -o, --format METAVAR'''
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, ', ')
if option.takes_value(): opts.append(' %s' % option.metavar)
return "".join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
def _hide_login_info(opts):
opts = list(opts)
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
try:
i = opts.index(private_opt)
opts[i+1] = '<PRIVATE>'
except ValueError:
pass
return opts
max_width = 80
max_help_position = 80
# No need to wrap help messages if we're on a wide console
columns = get_term_width()
if columns: max_width = columns
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
fmt.format_option_strings = _format_option_string
kw = {
'version' : __version__,
'formatter' : fmt,
'usage' : '%prog [options] url [url...]',
'conflict_handler' : 'resolve',
}
parser = optparse.OptionParser(**kw)
# option groups
general = optparse.OptionGroup(parser, 'General Options')
selection = optparse.OptionGroup(parser, 'Video Selection')
authentication = optparse.OptionGroup(parser, 'Authentication Options')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
downloader = optparse.OptionGroup(parser, 'Download Options')
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
workarounds = optparse.OptionGroup(parser, 'Workarounds')
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
general.add_option('-h', '--help',
action='help', help='print this help text and exit')
general.add_option('-v', '--version',
action='version', help='print program version and exit')
general.add_option('-U', '--update',
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
general.add_option('-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
general.add_option('--abort-on-error',
action='store_false', dest='ignoreerrors',
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
general.add_option('--dump-user-agent',
action='store_true', dest='dump_user_agent',
help='display the current browser identification', default=False)
general.add_option('--list-extractors',
action='store_true', dest='list_extractors',
help='List all supported extractors and the URLs they would handle', default=False)
general.add_option('--extractor-descriptions',
action='store_true', dest='list_extractor_descriptions',
help='Output descriptions of all supported extractors', default=False)
general.add_option(
'--proxy', dest='proxy', default=None, metavar='URL',
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
general.add_option(
'--socket-timeout', dest='socket_timeout',
type=float, default=None, help=u'Time to wait before giving up, in seconds')
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
general.add_option(
'--ignore-config',
action='store_true',
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
selection.add_option(
'--playlist-start',
dest='playliststart', metavar='NUMBER', default=1, type=int,
help='playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='playlist video to end at (default is last)')
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
selection.add_option('--max-downloads', metavar='NUMBER',
dest='max_downloads', type=int, default=None,
help='Abort after downloading NUMBER files')
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
selection.add_option(
'--datebefore', metavar='DATE', dest='datebefore', default=None,
help='download only videos uploaded on or before this date (i.e. inclusive)')
selection.add_option(
'--dateafter', metavar='DATE', dest='dateafter', default=None,
help='download only videos uploaded on or after this date (i.e. inclusive)')
selection.add_option(
'--min-views', metavar='COUNT', dest='min_views',
default=None, type=int,
help="Do not download any videos with less than COUNT views",)
selection.add_option(
'--max-views', metavar='COUNT', dest='max_views',
default=None, type=int,
help="Do not download any videos with more than COUNT views",)
selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
help='download only videos suitable for the given age',
default=None, type=int)
selection.add_option('--download-archive', metavar='FILE',
dest='download_archive',
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
selection.add_option(
'--include-ads', dest='include_ads',
action='store_true',
help='Download advertisements as well (experimental)')
selection.add_option(
'--youtube-include-dash-manifest', action='store_true',
dest='youtube_include_dash_manifest', default=False,
help='Try to download the DASH manifest on YouTube videos (experimental)')
authentication.add_option('-u', '--username',
dest='username', metavar='USERNAME', help='account username')
authentication.add_option('-p', '--password',
dest='password', metavar='PASSWORD', help='account password')
authentication.add_option('-n', '--netrc',
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
authentication.add_option('--video-password',
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
video_format.add_option('-f', '--format',
action='store', dest='format', metavar='FORMAT', default=None,
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.')
video_format.add_option('--all-formats',
action='store_const', dest='format', help='download all available video formats', const='all')
video_format.add_option('--prefer-free-formats',
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
video_format.add_option('--max-quality',
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-F', '--list-formats',
action='store_true', dest='listformats', help='list all available formats')
subtitles.add_option('--write-sub', '--write-srt',
action='store_true', dest='writesubtitles',
help='write subtitle file', default=False)
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
action='store_true', dest='writeautomaticsub',
help='write automatic subtitle file (youtube only)', default=False)
subtitles.add_option('--all-subs',
action='store_true', dest='allsubtitles',
help='downloads all the available subtitles of the video', default=False)
subtitles.add_option('--list-subs',
action='store_true', dest='listsubtitles',
help='lists all available subtitles for the video', default=False)
subtitles.add_option('--sub-format',
action='store', dest='subtitlesformat', metavar='FORMAT',
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
default=[], callback=_comma_separated_values_options_callback,
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
downloader.add_option('-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
downloader.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
downloader.add_option('--buffer-size',
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
downloader.add_option('--no-resize-buffer',
action='store_true', dest='noresizebuffer',
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
workarounds.add_option(
'--encoding', dest='encoding', metavar='ENCODING',
help='Force the specified encoding (experimental)')
workarounds.add_option(
'--no-check-certificate', action='store_true',
dest='no_check_certificate', default=False,
help='Suppress HTTPS certificate validation.')
workarounds.add_option(
'--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
workarounds.add_option(
'--user-agent', metavar='UA',
dest='user_agent', help='specify a custom user agent')
workarounds.add_option(
'--referer', metavar='REF',
dest='referer', default=None,
help='specify a custom referer, use if the video access is restricted to one domain',
)
workarounds.add_option(
'--add-header', metavar='FIELD:VALUE',
dest='headers', action='append',
help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
)
workarounds.add_option(
'--bidi-workaround', dest='bidi_workaround', action='store_true',
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False)
verbosity.add_option(
'--no-warnings',
dest='no_warnings', action='store_true', default=False,
help='Ignore warnings')
verbosity.add_option('-s', '--simulate',
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
verbosity.add_option('--skip-download',
action='store_true', dest='skip_download', help='do not download the video', default=False)
verbosity.add_option('-g', '--get-url',
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
verbosity.add_option('-e', '--get-title',
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
verbosity.add_option('--get-id',
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
verbosity.add_option('--get-thumbnail',
action='store_true', dest='getthumbnail',
help='simulate, quiet but print thumbnail URL', default=False)
verbosity.add_option('--get-description',
action='store_true', dest='getdescription',
help='simulate, quiet but print video description', default=False)
verbosity.add_option('--get-duration',
action='store_true', dest='getduration',
help='simulate, quiet but print video length', default=False)
verbosity.add_option('--get-filename',
action='store_true', dest='getfilename',
help='simulate, quiet but print output filename', default=False)
verbosity.add_option('--get-format',
action='store_true', dest='getformat',
help='simulate, quiet but print output format', default=False)
verbosity.add_option('-j', '--dump-json',
action='store_true', dest='dumpjson',
help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
verbosity.add_option('--newline',
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
verbosity.add_option('--no-progress',
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
verbosity.add_option('--console-title',
action='store_true', dest='consoletitle',
help='display progress in console titlebar', default=False)
verbosity.add_option('-v', '--verbose',
action='store_true', dest='verbose', help='print various debugging information', default=False)
verbosity.add_option('--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='print downloaded pages to debug problems (very verbose)')
verbosity.add_option('--write-pages',
action='store_true', dest='write_pages', default=False,
help='Write downloaded intermediary pages to files in the current directory to debug problems')
verbosity.add_option('--youtube-print-sig-code',
action='store_true', dest='youtube_print_sig_code', default=False,
help=optparse.SUPPRESS_HELP)
verbosity.add_option('--print-traffic',
dest='debug_printtraffic', action='store_true', default=False,
help='Display sent and read HTTP traffic')
filesystem.add_option('-a', '--batch-file',
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
filesystem.add_option('--id',
action='store_true', dest='useid', help='use only video ID in file name', default=False)
filesystem.add_option('-A', '--auto-number',
action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output',
dest='outtmpl', metavar='TEMPLATE',
help=('output filename template. Use %(title)s to get the title, '
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
'%(autonumber)s to get an automatically incremented number, '
'%(ext)s for the filename extension, '
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
'%(upload_date)s for the upload date (YYYYMMDD), '
'%(extractor)s for the provider (youtube, metacafe, etc), '
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
'%(height)s and %(width)s for the width and height of the video format. '
'%(resolution)s for a textual description of the resolution of the video format. '
'Use - to output to stdout. Can also be used to download to a different directory, '
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option('--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
filesystem.add_option('--restrict-filenames',
action='store_true', dest='restrictfilenames',
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
filesystem.add_option('-t', '--title',
action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
filesystem.add_option('-l', '--literal',
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
filesystem.add_option('-w', '--no-overwrites',
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
filesystem.add_option('-c', '--continue',
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
filesystem.add_option('--no-continue',
action='store_false', dest='continue_dl',
help='do not resume partially downloaded files (restart from beginning)')
filesystem.add_option('--no-part',
action='store_true', dest='nopart', help='do not use .part files', default=False)
filesystem.add_option('--no-mtime',
action='store_false', dest='updatetime',
help='do not use the Last-modified header to set the file modification time', default=True)
filesystem.add_option('--write-description',
action='store_true', dest='writedescription',
help='write video description to a .description file', default=False)
filesystem.add_option('--write-info-json',
action='store_true', dest='writeinfojson',
help='write video metadata to a .info.json file', default=False)
filesystem.add_option('--write-annotations',
action='store_true', dest='writeannotations',
help='write video annotations to a .annotation file', default=False)
filesystem.add_option('--write-thumbnail',
action='store_true', dest='writethumbnail',
help='write thumbnail image to disk', default=False)
filesystem.add_option('--load-info',
dest='load_info_filename', metavar='FILE',
help='json file containing the video information (created with the "--write-json" option)')
filesystem.add_option('--cookies',
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
filesystem.add_option(
'--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
filesystem.add_option(
'--no-cache-dir', action='store_const', const=None, dest='cachedir',
help='Disable filesystem caching')
filesystem.add_option(
'--rm-cache-dir', action='store_true', dest='rm_cachedir',
help='Delete all filesystem cache files')
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
help='keeps the video file on disk after the post-processing; the video is erased by default')
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
help='embed subtitles in the video (only for mp4 videos)')
postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
help='embed thumbnail in the audio as cover art')
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
help='write metadata to the video file')
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
help='Prefer ffmpeg over avconv for running the postprocessors')
parser.add_option_group(general)
parser.add_option_group(selection)
parser.add_option_group(downloader)
parser.add_option_group(filesystem)
parser.add_option_group(verbosity)
parser.add_option_group(workarounds)
parser.add_option_group(video_format)
parser.add_option_group(subtitles)
parser.add_option_group(authentication)
parser.add_option_group(postproc)
if overrideArguments is not None:
opts, args = parser.parse_args(overrideArguments)
if opts.verbose:
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
else:
commandLineConf = sys.argv[1:]
if '--ignore-config' in commandLineConf:
systemConf = []
userConf = []
else:
systemConf = _readOptions('/etc/youtube-dl.conf')
if '--ignore-config' in systemConf:
userConf = []
else:
userConf = _readUserConf()
argv = systemConf + userConf + commandLineConf
opts, args = parser.parse_args(argv)
if opts.verbose:
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
return parser, opts, args
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
@ -751,6 +291,7 @@ def _real_main(argv=None):
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
'twofactor': opts.twofactor,
'videopassword': opts.videopassword,
'quiet': (opts.quiet or any_printing),
'no_warnings': opts.no_warnings,
@ -827,6 +368,7 @@ def _real_main(argv=None):
'default_search': opts.default_search,
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
'encoding': opts.encoding,
'exec_cmd': opts.exec_cmd,
}
with YoutubeDL(ydl_opts) as ydl:
@ -850,26 +392,20 @@ def _real_main(argv=None):
ydl.add_post_processor(FFmpegAudioFixPP())
ydl.add_post_processor(AtomicParsleyPP())
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd:
ydl.add_post_processor(ExecAfterDownloadPP(
verboseOutput=opts.verbose, exec_cmd=opts.exec_cmd))
# Update version
if opts.update_self:
update_self(ydl.to_screen, opts.verbose)
# Remove cache dir
if opts.rm_cachedir:
if opts.cachedir is None:
ydl.to_screen(u'No cache dir specified (Did you combine --no-cache-dir and --rm-cache-dir?)')
else:
if ('.cache' not in opts.cachedir) or ('youtube-dl' not in opts.cachedir):
ydl.to_screen(u'Not removing directory %s - this does not look like a cache dir')
retcode = 141
else:
ydl.to_screen(
u'Removing cache dir %s .' % opts.cachedir,
skip_eol=True)
if os.path.exists(opts.cachedir):
ydl.to_screen(u'.', skip_eol=True)
shutil.rmtree(opts.cachedir)
ydl.to_screen(u'.')
ydl.cache.remove()
# Maybe do nothing
if (len(all_urls) < 1) and (opts.load_info_filename is None):

94
youtube_dl/cache.py Normal file
View File

@ -0,0 +1,94 @@
from __future__ import unicode_literals
import errno
import io
import json
import os
import re
import shutil
import traceback
from .utils import (
write_json_file,
)
class Cache(object):
def __init__(self, ydl):
self._ydl = ydl
def _get_root_dir(self):
res = self._ydl.params.get('cachedir')
if res is None:
cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache')
res = os.path.join(cache_root, 'youtube-dl')
return os.path.expanduser(res)
def _get_cache_fn(self, section, key, dtype):
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
'invalid section %r' % section
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
return os.path.join(
self._get_root_dir(), section, '%s.%s' % (key, dtype))
@property
def enabled(self):
return self._ydl.params.get('cachedir') is not False
def store(self, section, key, data, dtype='json'):
assert dtype in ('json',)
if not self.enabled:
return
fn = self._get_cache_fn(section, key, dtype)
try:
try:
os.makedirs(os.path.dirname(fn))
except OSError as ose:
if ose.errno != errno.EEXIST:
raise
write_json_file(data, fn)
except Exception:
tb = traceback.format_exc()
self._ydl.report_warning(
'Writing cache to %r failed: %s' % (fn, tb))
def load(self, section, key, dtype='json', default=None):
assert dtype in ('json',)
if not self.enabled:
return default
cache_fn = self._get_cache_fn(section, key, dtype)
try:
try:
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
return json.load(cachef)
except ValueError:
try:
file_size = os.path.getsize(cache_fn)
except (OSError, IOError) as oe:
file_size = str(oe)
self._ydl.report_warning(
'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
except IOError:
pass # No cache available
return default
def remove(self):
if not self.enabled:
self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
return
cachedir = self._get_root_dir()
if not any((term in cachedir) for term in ('cache', 'tmp')):
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
self._ydl.to_screen(
'Removing cache dir %s .' % cachedir, skip_eol=True)
if os.path.exists(cachedir):
self._ydl.to_screen('.', skip_eol=True)
shutil.rmtree(cachedir)
self._ydl.to_screen('.')

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
from .common import FileDownloader
from .hls import HlsFD
from .hls import NativeHlsFD
from .http import HttpFD
from .mplayer import MplayerFD
from .rtmp import RtmpFD
@ -19,6 +20,8 @@ def get_suitable_downloader(info_dict):
if url.startswith('rtmp'):
return RtmpFD
if protocol == 'm3u8_native':
return NativeHlsFD
if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
return HlsFD
if url.startswith('mms') or url.startswith('rtsp'):

View File

@ -42,6 +42,7 @@ class FileDownloader(object):
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):

View File

@ -16,6 +16,7 @@ from ..utils import (
format_bytes,
encodeFilename,
sanitize_open,
xpath_text,
)
@ -251,6 +252,8 @@ class F4mFD(FileDownloader):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
@ -290,6 +293,8 @@ class F4mFD(FileDownloader):
for (seg_i, frag_i) in fragments_list:
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
if akamai_pv:
url += '?' + akamai_pv.strip(';')
frag_filename = '%s-%s' % (tmpfilename, name)
success = http_dl.download(frag_filename, {'url': url})
if not success:

View File

@ -1,8 +1,14 @@
from __future__ import unicode_literals
import os
import re
import subprocess
from .common import FileDownloader
from ..utils import (
compat_urlparse,
compat_urllib_request,
check_executable,
encodeFilename,
)
@ -19,13 +25,11 @@ class HlsFD(FileDownloader):
encodeFilename(tmpfilename, for_subprocess=True)]
for program in ['avconv', 'ffmpeg']:
try:
subprocess.call([program, '-version'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
if check_executable(program, ['-version']):
break
except (OSError, IOError):
pass
else:
self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
return False
cmd = [program] + args
retval = subprocess.call(cmd)
@ -42,5 +46,59 @@ class HlsFD(FileDownloader):
return True
else:
self.to_stderr(u"\n")
self.report_error(u'ffmpeg exited with code %d' % retval)
self.report_error(u'%s exited with code %d' % (program, retval))
return False
class NativeHlsFD(FileDownloader):
""" A more limited implementation that does not require ffmpeg """
def real_download(self, filename, info_dict):
url = info_dict['url']
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
self.to_screen(
'[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
data = self.ydl.urlopen(url).read()
s = data.decode('utf-8', 'ignore')
segment_urls = []
for line in s.splitlines():
line = line.strip()
if line and not line.startswith('#'):
segment_url = (
line
if re.match(r'^https?://', line)
else compat_urlparse.urljoin(url, line))
segment_urls.append(segment_url)
is_test = self.params.get('test', False)
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
byte_counter = 0
with open(tmpfilename, 'wb') as outf:
for i, segurl in enumerate(segment_urls):
self.to_screen(
'[hlsnative] %s: Downloading segment %d / %d' %
(info_dict['id'], i + 1, len(segment_urls)))
seg_req = compat_urllib_request.Request(segurl)
if remaining_bytes is not None:
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
segment = self.ydl.urlopen(seg_req).read()
if remaining_bytes is not None:
segment = segment[:remaining_bytes]
remaining_bytes -= len(segment)
outf.write(segment)
byte_counter += len(segment)
if remaining_bytes is not None and remaining_bytes <= 0:
break
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': filename,
'status': 'finished',
})
self.try_rename(tmpfilename, filename)
return True

View File

@ -14,8 +14,6 @@ from ..utils import (
class HttpFD(FileDownloader):
_TEST_FILE_SIZE = 10241
def real_download(self, filename, info_dict):
url = info_dict['url']
tmpfilename = self.temp_name(filename)
@ -193,7 +191,8 @@ class HttpFD(FileDownloader):
self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
return False
stream.close()
if tmpfilename != u'-':
stream.close()
self.report_finish(data_len_str, (time.time() - start))
if data_len is not None and byte_counter != data_len:
raise ContentTooShortError(byte_counter, int(data_len))

View File

@ -8,9 +8,10 @@ import time
from .common import FileDownloader
from ..utils import (
check_executable,
compat_str,
encodeFilename,
format_bytes,
compat_str,
)
@ -103,9 +104,7 @@ class RtmpFD(FileDownloader):
test = self.params.get('test', False)
# Check for rtmpdump first
try:
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError):
if not check_executable('rtmpdump', ['-h']):
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
return False

View File

@ -4,12 +4,13 @@ from .addanime import AddAnimeIE
from .adultswim import AdultSwimIE
from .aftonbladet import AftonbladetIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE
from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
@ -23,6 +24,8 @@ from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
@ -44,6 +47,7 @@ from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
@ -65,9 +69,12 @@ from .dailymotion import (
DailymotionUserIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dump import DumpIE
from .defense import DefenseGouvFrIE
@ -77,17 +84,20 @@ from .dropbox import DropboxIE
from .ebaumsworld import EbaumsWorldIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .empflix import EmpflixIE
from .empflix import EMPFlixIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
@ -115,21 +125,29 @@ from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import GameOneIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .grooveshark import GroovesharkIE
from .hark import HarkIE
from .heise import HeiseIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
@ -173,6 +191,7 @@ from .livestream import (
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
@ -184,13 +203,17 @@ from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .ministrygrid import MinistryGridIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
@ -203,7 +226,9 @@ from .mtv import (
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE
from .myspass import MySpassIE
@ -219,15 +244,20 @@ from .ndtv import NDTVIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import NHLIE, NHLVideocenterIE
from .niconico import NiconicoIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .npo import NPOIE
from .npo import (
NPOIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKTVIE,
@ -235,6 +265,7 @@ from .nrk import (
from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
from .oktoberfesttv import OktoberfestTVIE
from .ooyala import OoyalaIE
from .orf import (
ORFTVthekIE,
@ -245,12 +276,16 @@ from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .photobucket import PhotobucketIE
from .planetaplay import PlanetaPlayIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .pornoxo import PornoXOIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .radiofrance import RadioFranceIE
@ -282,6 +317,7 @@ from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .servingsys import ServingSysIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
@ -307,17 +343,22 @@ from .southpark import (
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .sunporno import SunPornoIE
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
@ -326,13 +367,21 @@ from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telemb import TeleMBIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tnaflix import TNAFlixIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
@ -341,6 +390,7 @@ from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
@ -358,23 +408,26 @@ from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import VGTVIE
from .vh1 import VH1IE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vimeo import (
VimeoIE,
VimeoChannelIE,
VimeoUserIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
@ -385,11 +438,14 @@ from .vine import (
from .viki import VikiIE
from .vk import VKIE
from .vodlocker import VodlockerIE
from .vporn import VpornIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
@ -408,12 +464,13 @@ from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .yahoo import (
YahooIE,
YahooNewsIE,
YahooSearchIE,
)
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,

View File

@ -22,8 +22,7 @@ class ABCIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urls_info_json = self._search_regex(

View File

@ -7,6 +7,15 @@ from .common import InfoExtractor
class AcademicEarthCourseIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
IE_NAME = 'AcademicEarth:Course'
_TEST = {
'url': 'http://academicearth.org/playlists/laws-of-nature/',
'info_dict': {
'id': 'laws-of-nature',
'title': 'Laws of Nature',
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
},
'playlist_count': 4,
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)

View File

@ -75,7 +75,9 @@ class AdultSwimIE(InfoExtractor):
video_path = mobj.group('path')
webpage = self._download_webpage(url, video_path)
episode_id = self._html_search_regex(r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>', webpage, 'episode_id')
episode_id = self._html_search_regex(
r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>',
webpage, 'episode_id')
title = self._og_search_title(webpage)
index_url = 'http://asfix.adultswim.com/asfix-svc/episodeSearch/getEpisodesByIDs?networkName=AS&ids=%s' % episode_id
@ -97,7 +99,9 @@ class AdultSwimIE(InfoExtractor):
duration = segment_el.attrib.get('duration')
segment_url = 'http://asfix.adultswim.com/asfix-svc/episodeservices/getCvpPlaylist?networkName=AS&id=%s' % segment_id
idoc = self._download_xml(segment_url, segment_title, 'Downloading segment information', 'Unable to download segment information')
idoc = self._download_xml(
segment_url, segment_title,
'Downloading segment information', 'Unable to download segment information')
formats = []
file_els = idoc.findall('.//files/file')

View File

@ -0,0 +1,61 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
)
class AnySexIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?anysex\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://anysex.com/156592/',
'md5': '023e9fbb7f7987f5529a394c34ad3d3d',
'info_dict': {
'id': '156592',
'ext': 'mp4',
'title': 'Busty and sexy blondie in her bikini strips for you',
'description': 'md5:de9e418178e2931c10b62966474e1383',
'categories': ['Erotic'],
'duration': 270,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(r"video_url\s*:\s*'([^']+)'", webpage, 'video URL')
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
description = self._html_search_regex(
r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
categories = re.findall(
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
duration = parse_duration(self._search_regex(
r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'duration': duration,
'view_count': view_count,
'age_limit': 18,
}

View File

@ -21,7 +21,7 @@ class AolIE(InfoExtractor):
(?:$|\?)
'''
_TEST = {
_TESTS = [{
'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
'md5': '18ef68f48740e86ae94b98da815eec42',
'info_dict': {
@ -30,7 +30,14 @@ class AolIE(InfoExtractor):
'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
},
'add_ie': ['FiveMin'],
}
}, {
'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316',
'info_dict': {
'id': '152147',
'title': 'Brace Yourself - Today\'s Weirdest News',
},
'playlist_mincount': 10,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)

View File

@ -8,12 +8,15 @@ from ..utils import (
determine_ext,
ExtractorError,
qualities,
compat_urllib_parse_urlparse,
compat_urllib_parse,
int_or_none,
parse_duration,
unified_strdate,
xpath_text,
)
class ARDIE(InfoExtractor):
class ARDMediathekIE(InfoExtractor):
IE_NAME = 'ARD:mediathek'
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
_TESTS = [{
@ -46,9 +49,6 @@ class ARDIE(InfoExtractor):
else:
video_id = m.group('video_id')
urlp = compat_urllib_parse_urlparse(url)
url = urlp._replace(path=compat_urllib_parse.quote(urlp.path.encode('utf-8'))).geturl()
webpage = self._download_webpage(url, video_id)
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
@ -128,3 +128,61 @@ class ARDIE(InfoExtractor):
'formats': formats,
'thumbnail': thumbnail,
}
class ARDIE(InfoExtractor):
_VALID_URL = '(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
_TEST = {
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
'md5': 'd216c3a86493f9322545e045ddc3eb35',
'info_dict': {
'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
'id': '100',
'ext': 'mp4',
'duration': 2600,
'title': 'Die Story im Ersten: Mission unter falscher Flagge',
'upload_date': '20140804',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
player_url = mobj.group('mainurl') + '~playerXml.xml'
doc = self._download_xml(player_url, display_id)
video_node = doc.find('./video')
upload_date = unified_strdate(xpath_text(
video_node, './broadcastDate'))
thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
formats = []
for a in video_node.findall('.//asset'):
f = {
'format_id': a.attrib['type'],
'width': int_or_none(a.find('./frameWidth').text),
'height': int_or_none(a.find('./frameHeight').text),
'vbr': int_or_none(a.find('./bitrateVideo').text),
'abr': int_or_none(a.find('./bitrateAudio').text),
'vcodec': a.find('./codecVideo').text,
'tbr': int_or_none(a.find('./totalBitrate').text),
}
if a.find('./serverPrefix').text:
f['url'] = a.find('./serverPrefix').text
f['playpath'] = a.find('./fileName').text
else:
f['url'] = a.find('./fileName').text
formats.append(f)
self._sort_formats(formats)
return {
'id': mobj.group('id'),
'formats': formats,
'display_id': display_id,
'title': video_node.find('./title').text,
'duration': parse_duration(video_node.find('./duration').text),
'upload_date': upload_date,
'thumbnail': thumbnail,
}

View File

@ -78,18 +78,23 @@ class ArteTVPlus7IE(InfoExtractor):
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(
r'arte_vp_url="(.*?)"', webpage, 'json vp url')
[r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'],
webpage, 'json vp url')
return self._extract_from_json_url(json_url, video_id, lang)
def _extract_from_json_url(self, json_url, video_id, lang):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = player_info.get('VDA', '').split(' ')[0]
info_dict = {
'id': player_info['VID'],
'title': player_info['VTI'],
'description': player_info.get('VDE'),
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}

View File

@ -59,6 +59,13 @@ class BambuserChannelIE(InfoExtractor):
_VALID_URL = r'https?://bambuser\.com/channel/(?P<user>.*?)(?:/|#|\?|$)'
# The maximum number we can get with each request
_STEP = 50
_TEST = {
'url': 'http://bambuser.com/channel/pixelversity',
'info_dict': {
'title': 'pixelversity',
},
'playlist_mincount': 60,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@ -73,10 +80,10 @@ class BambuserChannelIE(InfoExtractor):
req = compat_urllib_request.Request(req_url)
# Without setting this header, we wouldn't get any result
req.add_header('Referer', 'http://bambuser.com/channel/%s' % user)
info_json = self._download_webpage(req, user,
'Downloading page %d' % i)
results = json.loads(info_json)['result']
if len(results) == 0:
data = self._download_json(
req, user, 'Downloading page %d' % i)
results = data['result']
if not results:
break
last_id = results[-1]['vid']
urls.extend(self.url_result(v['page'], 'Bambuser') for v in results)

View File

@ -15,13 +15,23 @@ class BandcampIE(InfoExtractor):
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
_TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
'file': '1812978515.mp3',
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
"duration": 9.8485,
'id': '1812978515',
'ext': 'mp3',
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
'duration': 9.8485,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}, {
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
'md5': '2b68e5851514c20efdff2afc5603b8b4',
'info_dict': {
'id': '2650410135',
'ext': 'mp3',
'title': 'Lanius (Battle)',
'uploader': 'Ben Prunty Music',
},
}]
def _real_extract(self, url):
@ -59,9 +69,9 @@ class BandcampIE(InfoExtractor):
raise ExtractorError('No free songs found')
download_link = m_download.group(1)
video_id = re.search(
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
webpage, re.MULTILINE | re.DOTALL).group('id')
video_id = self._search_regex(
r'var TralbumData = {.*?id: (?P<id>\d+),?$',
webpage, 'video id', flags=re.MULTILINE | re.DOTALL)
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
# We get the dictionary of the track from some javascript code
@ -96,7 +106,7 @@ class BandcampAlbumIE(InfoExtractor):
IE_NAME = 'Bandcamp:album'
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+))'
_TEST = {
_TESTS = [{
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
'playlist': [
{
@ -118,7 +128,13 @@ class BandcampAlbumIE(InfoExtractor):
'playlistend': 2
},
'skip': 'Bandcamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
}
}, {
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
'info_dict': {
'title': 'Hierophany of the Open Grave',
},
'playlist_mincount': 9,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)

View File

@ -0,0 +1,65 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://beeg.com/5416503',
'md5': '634526ae978711f6b748fe0dd6c11f57',
'info_dict': {
'id': '5416503',
'ext': 'mp4',
'title': 'Sultry Striptease',
'description': 'md5:6db3c6177972822aaba18652ff59c773',
'categories': list, # NSFW
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
quality_arr = self._search_regex(
r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats')
formats = [{
'url': fmt[1],
'format_id': fmt[0],
'height': int(fmt[0][:-1]),
} for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)]
self._sort_formats(formats)
title = self._html_search_regex(
r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title')
description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"',
webpage, 'description', fatal=False)
thumbnail = self._html_search_regex(
r'\'previewer.url\'\s*:\s*"([^"]*)"',
webpage, 'thumbnail', fatal=False)
categories_str = self._html_search_regex(
r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
categories = (
None if categories_str is None
else categories_str.split(','))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats,
'age_limit': 18,
}

View File

@ -0,0 +1,53 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import url_basename
class BehindKinkIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
_TEST = {
'url': 'http://www.behindkink.com/2014/08/14/ab1576-performers-voice-finally-heard-the-bill-is-killed/',
'md5': '41ad01222b8442089a55528fec43ec01',
'info_dict': {
'id': '36370',
'ext': 'mp4',
'title': 'AB1576 - PERFORMERS VOICE FINALLY HEARD - THE BILL IS KILLED!',
'description': 'The adult industry voice was finally heard as Assembly Bill 1576 remained\xa0 in suspense today at the Senate Appropriations Hearing. AB1576 was, among other industry damaging issues, a condom mandate...',
'upload_date': '20140814',
'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/08/36370_AB1576_Win.jpg',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
year = mobj.group('year')
month = mobj.group('month')
day = mobj.group('day')
upload_date = year + month + day
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r"'file':\s*'([^']+)'",
webpage, 'URL base')
video_id = url_basename(video_url)
video_id = video_id.split('_')[0]
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': self._og_search_title(webpage),
'display_id': display_id,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
'upload_date': upload_date,
'age_limit': 18,
}

View File

@ -15,7 +15,7 @@ from ..utils import (
class BlipTVIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_TESTS]+)))'
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))'
_TESTS = [
{

View File

@ -26,17 +26,8 @@ class BRIE(InfoExtractor):
'title': 'Wenn das Traditions-Theater wackelt',
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
'duration': 34,
}
},
{
'url': 'http://www.br.de/mediathek/video/sendungen/unter-unserem-himmel/unter-unserem-himmel-alpen-ueber-den-pass-100.html',
'md5': 'ab451b09d861dbed7d7cc9ab0be19ebe',
'info_dict': {
'id': '2c060e69-3a27-4e13-b0f0-668fac17d812',
'ext': 'mp4',
'title': 'Über den Pass',
'description': 'Die Eroberung der Alpen: Über den Pass',
'duration': 2588,
'uploader': 'BR',
'upload_date': '20140802',
}
},
{
@ -77,8 +68,7 @@ class BRIE(InfoExtractor):
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
display_id = self._match_id(url)
page = self._download_webpage(url, display_id)
xml_url = self._search_regex(
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')

View File

@ -4,37 +4,61 @@ import re
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
)
class BreakIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
_TEST = {
_VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
'md5': '33aa4ff477ecd124d18d7b5d23b87ce5',
'info_dict': {
'id': '2468056',
'ext': 'mp4',
'title': 'When Girls Act Like D-Bags',
}
}
}, {
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1).split("-")[-1]
embed_url = 'http://www.break.com/embed/%s' % video_id
webpage = self._download_webpage(embed_url, video_id)
info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>',
webpage, 'info json', flags=re.DOTALL)
info = json.loads(info_json)
video_url = info['videoUri']
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.break.com/embed/%s' % video_id, video_id)
info = json.loads(self._search_regex(
r'var embedVars = ({.*})\s*?</script>',
webpage, 'info json', flags=re.DOTALL))
youtube_id = info.get('youtubeId')
if youtube_id:
return self.url_result(youtube_id, 'Youtube')
final_url = video_url + '?' + info['AuthToken']
formats = [{
'url': media['uri'] + '?' + info['AuthToken'],
'tbr': media['bitRate'],
'width': media['width'],
'height': media['height'],
} for media in info['media']]
if not formats:
formats.append({
'url': info['videoUri']
})
self._sort_formats(formats)
duration = int_or_none(info.get('videoLengthInSeconds'))
age_limit = parse_age_limit(info.get('audienceRating'))
return {
'id': video_id,
'url': final_url,
'title': info['contentName'],
'thumbnail': info['thumbUri'],
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}

View File

@ -25,7 +25,7 @@ class CBSIE(InfoExtractor):
}, {
'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
'info_dict': {
'id': 'P9gjWjelt6iP',
'id': 'WWF_5KqY3PK1',
'ext': 'flv',
'title': 'Live on Letterman - St. Vincent',
'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',

View File

@ -42,7 +42,7 @@ class ChilloutzoneIE(InfoExtractor):
'id': '85523671',
'ext': 'mp4',
'title': 'The Sunday Times - Icons',
'description': 'md5:a5f7ff82e2f7a9ed77473fe666954e84',
'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}',
'uploader': 'Us',
'uploader_id': 'usfilms',
'upload_date': '20140131'

View File

@ -1,11 +1,13 @@
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
translation_table = {
_translation_table = {
'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n',
'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r',
'y': 'l', 'z': 'i',
@ -13,6 +15,10 @@ translation_table = {
}
def _decode(s):
return ''.join(_translation_table.get(c, c) for c in s)
class CliphunterIE(InfoExtractor):
IE_NAME = 'cliphunter'
@ -22,10 +28,13 @@ class CliphunterIE(InfoExtractor):
'''
_TEST = {
'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo',
'file': '1012420.flv',
'md5': '15e7740f30428abf70f4223478dc1225',
'md5': 'a2ba71eebf523859fe527a61018f723e',
'info_dict': {
'id': '1012420',
'ext': 'mp4',
'title': 'Fun Jynx Maze solo',
'thumbnail': 're:^https?://.*\.jpg$',
'age_limit': 18,
}
}
@ -35,22 +44,52 @@ class CliphunterIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
video_title = self._search_regex(
r'mediaTitle = "([^"]+)"', webpage, 'title')
pl_fiji = self._search_regex(
r'pl_fiji = \'([^\']+)\'', webpage, 'video data')
pl_c_qual = self._search_regex(
r'pl_c_qual = "(.)"', webpage, 'video quality')
video_title = self._search_regex(
r'mediaTitle = "([^"]+)"', webpage, 'title')
video_url = ''.join(translation_table.get(c, c) for c in pl_fiji)
video_url = _decode(pl_fiji)
formats = [{
'url': video_url,
'format_id': pl_c_qual,
'format_id': 'default-%s' % pl_c_qual,
}]
qualities_json = self._search_regex(
r'var pl_qualities\s*=\s*(.*?);\n', webpage, 'quality info')
qualities_data = json.loads(qualities_json)
for i, t in enumerate(
re.findall(r"pl_fiji_([a-z0-9]+)\s*=\s*'([^']+')", webpage)):
quality_id, crypted_url = t
video_url = _decode(crypted_url)
f = {
'format_id': quality_id,
'url': video_url,
'quality': i,
}
if quality_id in qualities_data:
qd = qualities_data[quality_id]
m = re.match(
r'''(?x)<b>(?P<width>[0-9]+)x(?P<height>[0-9]+)<\\/b>
\s*\(\s*(?P<tbr>[0-9]+)\s*kb\\/s''', qd)
if m:
f['width'] = int(m.group('width'))
f['height'] = int(m.group('height'))
f['tbr'] = int(m.group('tbr'))
formats.append(f)
self._sort_formats(formats)
thumbnail = self._search_regex(
r"var\s+mov_thumb\s*=\s*'([^']+)';",
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'title': video_title,
'formats': formats,
'age_limit': self._rta_search(webpage),
'thumbnail': thumbnail,
}

View File

@ -0,0 +1,108 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_parse_qs,
compat_urllib_parse,
remove_end,
HEADRequest,
compat_HTTPError,
)
class CloudyIE(InfoExtractor):
_IE_DESC = 'cloudy.ec and videoraj.ch'
_VALID_URL = r'''(?x)
https?://(?:www\.)?(?P<host>cloudy\.ec|videoraj\.ch)/
(?:v/|embed\.php\?id=)
(?P<id>[A-Za-z0-9]+)
'''
_EMBED_URL = 'http://www.%s/embed.php?id=%s'
_API_URL = 'http://www.%s/api/player.api.php?%s'
_MAX_TRIES = 2
_TESTS = [
{
'url': 'https://www.cloudy.ec/v/af511e2527aac',
'md5': '5cb253ace826a42f35b4740539bedf07',
'info_dict': {
'id': 'af511e2527aac',
'ext': 'flv',
'title': 'Funny Cats and Animals Compilation june 2013',
}
},
{
'url': 'http://www.videoraj.ch/v/47f399fd8bb60',
'md5': '7d0f8799d91efd4eda26587421c3c3b0',
'info_dict': {
'id': '47f399fd8bb60',
'ext': 'flv',
'title': 'Burning a New iPhone 5 with Gasoline - Will it Survive?',
}
}
]
def _extract_video(self, video_host, video_id, file_key, error_url=None, try_num=0):
if try_num > self._MAX_TRIES - 1:
raise ExtractorError('Unable to extract video URL', expected=True)
form = {
'file': video_id,
'key': file_key,
}
if error_url:
form.update({
'numOfErrors': try_num,
'errorCode': '404',
'errorUrl': error_url,
})
data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
player_data = self._download_webpage(
data_url, video_id, 'Downloading player data')
data = compat_parse_qs(player_data)
try_num += 1
if 'error' in data:
raise ExtractorError(
'%s error: %s' % (self.IE_NAME, ' '.join(data['error_msg'])),
expected=True)
title = data.get('title', [None])[0]
if title:
title = remove_end(title, '&asdasdas').strip()
video_url = data.get('url', [None])[0]
if video_url:
try:
self._request_webpage(HEADRequest(video_url), video_id, 'Checking video URL')
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in [404, 410]:
self.report_warning('Invalid video URL, requesting another', video_id)
return self._extract_video(video_host, video_id, file_key, video_url, try_num)
return {
'id': video_id,
'url': video_url,
'title': title,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_host = mobj.group('host')
video_id = mobj.group('id')
url = self._EMBED_URL % (video_host, video_id)
webpage = self._download_webpage(url, video_id)
file_key = self._search_regex(
r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
return self._extract_video(video_host, video_id, file_key)

View File

@ -43,14 +43,14 @@ class ComedyCentralShowsIE(InfoExtractor):
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
(?P<clip>
(?:(?:guests/[^/]+|videos|video-playlists|special-editions)/[^/]+/(?P<videotitle>[^/?#]+))
(?:(?:guests/[^/]+|videos|video-playlists|special-editions|news-team/[^/]+)/[^/]+/(?P<videotitle>[^/?#]+))
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
)|
(?P<interview>
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
(?:[?#].*|$)'''
_TEST = {
_TESTS = [{
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
'info_dict': {
@ -61,7 +61,34 @@ class ComedyCentralShowsIE(InfoExtractor):
'uploader': 'thedailyshow',
'title': 'thedailyshow kristen-stewart part 1',
}
}
}, {
'url': 'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
'only_matching': True,
}, {
'url': 'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
'only_matching': True,
}, {
'url': 'http://thedailyshow.cc.com/news-team/michael-che/7wnfel/we-need-to-talk-about-israel',
'only_matching': True,
}]
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
@ -185,6 +212,9 @@ class ComedyCentralShowsIE(InfoExtractor):
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
'format_note': 'HTTP 400 at the moment (patches welcome!)',
'preference': -100,
})
formats.append({
'format_id': 'rtmp-%s' % format,

View File

@ -1,4 +1,7 @@
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
@ -13,11 +16,13 @@ from ..utils import (
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
clean_html,
compiled_regex_type,
ExtractorError,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
@ -114,7 +119,7 @@ class InfoExtractor(object):
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location of the video.
location: Physical location where the video was filmed.
subtitles: The subtitle file contents as a dictionary in the format
{language: subtitles}.
duration: Length of the video in seconds, as an integer.
@ -128,9 +133,13 @@ class InfoExtractor(object):
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
@ -159,6 +168,14 @@ class InfoExtractor(object):
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
@ -202,17 +219,17 @@ class InfoExtractor(object):
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen(u'%s' % (note,))
self.to_screen('%s' % (note,))
else:
self.to_screen(u'%s: %s' % (video_id, note))
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = u'Unable to download webpage'
errmsg = u'%s: %s' % (errnote, compat_str(err))
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
@ -249,7 +266,7 @@ class InfoExtractor(object):
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen(u'Dumping request to ' + url)
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
@ -259,11 +276,11 @@ class InfoExtractor(object):
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = u'___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen(u'Saving request to ' + filename)
self.to_screen('Saving request to ' + filename)
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
@ -272,14 +289,14 @@ class InfoExtractor(object):
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if (u'<title>Access to this site is blocked</title>' in content and
u'Websense' in content[:512]):
msg = u'Access to this webpage has been blocked by Websense filtering software in your network.'
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
u'Websense information URL', default=None)
'Websense information URL', default=None)
if blocked_iframe:
msg += u' Visit %s for more details' % blocked_iframe
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
return (content, urlh)
@ -294,7 +311,7 @@ class InfoExtractor(object):
return content
def _download_xml(self, url_or_request, video_id,
note=u'Downloading XML', errnote=u'Unable to download XML',
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
@ -306,8 +323,8 @@ class InfoExtractor(object):
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note=u'Downloading JSON metadata',
errnote=u'Unable to download JSON metadata',
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True):
json_string = self._download_webpage(
@ -319,32 +336,36 @@ class InfoExtractor(object):
try:
return json.loads(json_string)
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = u'' if video_id is None else u'%s: ' % video_id
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
u'[%s] %s%s' % (self.IE_NAME, idstr, msg))
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen(u'%s: Extracting information' % id_or_name)
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen(u'%s: Downloading webpage' % video_id)
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen(u'Confirming age')
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen(u'Logging in')
self.to_screen('Logging in')
#Methods for following #608
@staticmethod
@ -384,7 +405,7 @@ class InfoExtractor(object):
break
if os.name != 'nt' and sys.stderr.isatty():
_name = u'\033[0;34m%s\033[0m' % name
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
@ -394,10 +415,10 @@ class InfoExtractor(object):
elif default is not _NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError(u'Unable to extract %s' % _name)
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning(u'unable to extract %s; '
u'please report this issue on http://yt-dl.org/bug' % _name)
self._downloader.report_warning('unable to extract %s; '
'please report this issue on http://yt-dl.org/bug' % _name)
return None
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
@ -436,10 +457,26 @@ class InfoExtractor(object):
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return None
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
@ -460,7 +497,7 @@ class InfoExtractor(object):
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs)
return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
@ -519,7 +556,7 @@ class InfoExtractor(object):
def _sort_formats(self, formats):
if not formats:
raise ExtractorError(u'No video formats found')
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
@ -539,9 +576,9 @@ class InfoExtractor(object):
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = [u'aac', u'mp3', u'm4a', u'webm', u'ogg', u'opus']
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = [u'webm', u'opus', u'ogg', u'mp3', u'aac', u'm4a']
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
@ -549,9 +586,9 @@ class InfoExtractor(object):
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = [u'flv', u'mp4', u'webm']
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = [u'webm', u'flv', u'mp4']
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
@ -593,7 +630,7 @@ class InfoExtractor(object):
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = u'%(video_id)s: Waiting for %(timeout)s seconds'
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
@ -604,11 +641,15 @@ class InfoExtractor(object):
'Unable to download f4m manifest')
formats = []
for media_el in manifest.findall('{http://ns.adobe.com/f4m/1.0}media'):
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
format_id = 'f4m-%d' % (i if tbr is None else tbr)
formats.append({
'format_id': format_id,
'url': manifest_url,
'ext': 'flv',
'tbr': int_or_none(media_el.attrib.get('bitrate')),
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
})
@ -616,6 +657,98 @@ class InfoExtractor(object):
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None):
formats = [{
'format_id': 'm3u8-meta',
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(m3u8_url, video_id)
last_info = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
f = {
'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
class SearchInfoExtractor(InfoExtractor):
"""
@ -635,7 +768,7 @@ class SearchInfoExtractor(InfoExtractor):
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError(u'Invalid search query "%s"' % query)
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
@ -646,9 +779,9 @@ class SearchInfoExtractor(InfoExtractor):
else:
n = int(prefix)
if n <= 0:
raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)

View File

@ -5,10 +5,11 @@ import re
import json
import base64
import zlib
import xml.etree.ElementTree
from hashlib import sha1
from math import pow, sqrt, floor
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_parse,
@ -17,6 +18,7 @@ from ..utils import (
intlist_to_bytes,
unified_strdate,
clean_html,
urlencode_postdata,
)
from ..aes import (
aes_cbc_decrypt,
@ -24,7 +26,7 @@ from ..aes import (
)
class CrunchyrollIE(InfoExtractor):
class CrunchyrollIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TEST = {
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
@ -51,6 +53,26 @@ class CrunchyrollIE(InfoExtractor):
'1080': ('80', '108'),
}
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
self.report_login()
login_url = 'https://www.crunchyroll.com/?a=formhandler'
data = urlencode_postdata({
'formname': 'RpcApiUser_Login',
'name': username,
'password': password,
})
login_request = compat_urllib_request.Request(login_url, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(login_request, None, False, 'Wrong login info')
def _real_initialize(self):
self._login()
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(data)
iv = bytes_to_intlist(iv)
@ -97,6 +119,75 @@ class CrunchyrollIE(InfoExtractor):
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
return output
def _convert_subtitles_to_ass(self, subtitles):
output = ''
def ass_bool(strvalue):
assvalue = '0'
if strvalue == '1':
assvalue = '-1'
return assvalue
sub_root = xml.etree.ElementTree.fromstring(subtitles)
if not sub_root:
return output
output = '[Script Info]\n'
output += 'Title: %s\n' % sub_root.attrib["title"]
output += 'ScriptType: v4.00+\n'
output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
output += """ScaledBorderAndShadow: yes
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
"""
for style in sub_root.findall('./styles/style'):
output += 'Style: ' + style.attrib["name"]
output += ',' + style.attrib["font_name"]
output += ',' + style.attrib["font_size"]
output += ',' + style.attrib["primary_colour"]
output += ',' + style.attrib["secondary_colour"]
output += ',' + style.attrib["outline_colour"]
output += ',' + style.attrib["back_colour"]
output += ',' + ass_bool(style.attrib["bold"])
output += ',' + ass_bool(style.attrib["italic"])
output += ',' + ass_bool(style.attrib["underline"])
output += ',' + ass_bool(style.attrib["strikeout"])
output += ',' + style.attrib["scale_x"]
output += ',' + style.attrib["scale_y"]
output += ',' + style.attrib["spacing"]
output += ',' + style.attrib["angle"]
output += ',' + style.attrib["border_style"]
output += ',' + style.attrib["outline"]
output += ',' + style.attrib["shadow"]
output += ',' + style.attrib["alignment"]
output += ',' + style.attrib["margin_l"]
output += ',' + style.attrib["margin_r"]
output += ',' + style.attrib["margin_v"]
output += ',' + style.attrib["encoding"]
output += '\n'
output += """
[Events]
Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
"""
for event in sub_root.findall('./events/event'):
output += 'Dialogue: 0'
output += ',' + event.attrib["start"]
output += ',' + event.attrib["end"]
output += ',' + event.attrib["style"]
output += ',' + event.attrib["name"]
output += ',' + event.attrib["margin_l"]
output += ',' + event.attrib["margin_r"]
output += ',' + event.attrib["margin_v"]
output += ',' + event.attrib["effect"]
output += ',' + event.attrib["text"]
output += '\n'
return output
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
@ -158,6 +249,7 @@ class CrunchyrollIE(InfoExtractor):
})
subtitles = {}
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
video_id, note='Downloading subtitles for '+sub_name)
@ -174,7 +266,14 @@ class CrunchyrollIE(InfoExtractor):
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
if not lang_code:
continue
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
if sub_format == 'ass':
subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle)
else:
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
return {
'id': video_id,

View File

@ -34,6 +34,13 @@ class CSpanIE(InfoExtractor):
'title': 'International Health Care Models',
'description': 'md5:7a985a2d595dba00af3d9c9f0783c967',
}
}, {
'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall',
'info_dict': {
'id': '342759',
'title': 'General Motors Ignition Switch Recall',
},
'playlist_duration_sum': 14855,
}]
def _real_extract(self, url):

View File

@ -1,3 +1,6 @@
#coding: utf-8
from __future__ import unicode_literals
import re
import json
import itertools
@ -28,60 +31,58 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = u'dailymotion'
IE_NAME = 'dailymotion'
_FORMATS = [
(u'stream_h264_ld_url', u'ld'),
(u'stream_h264_url', u'standard'),
(u'stream_h264_hq_url', u'hq'),
(u'stream_h264_hd_url', u'hd'),
(u'stream_h264_hd1080_url', u'hd180'),
('stream_h264_ld_url', 'ld'),
('stream_h264_url', 'standard'),
('stream_h264_hq_url', 'hq'),
('stream_h264_hd_url', 'hd'),
('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [
{
u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
u'file': u'x33vw9.mp4',
u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
u'info_dict': {
u"uploader": u"Amphora Alex and Van .",
u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
'url': 'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
'md5': '392c4b85a60a90dc4792da41ce3144eb',
'info_dict': {
'id': 'x33vw9',
'ext': 'mp4',
'uploader': 'Amphora Alex and Van .',
'title': 'Tutoriel de Youtubeur"DL DES VIDEO DE YOUTUBE"',
}
},
# Vevo video
{
u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
u'file': u'USUV71301934.mp4',
u'info_dict': {
u'title': u'Roar (Official)',
u'uploader': u'Katy Perry',
u'upload_date': u'20130905',
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
'info_dict': {
'title': 'Roar (Official)',
'id': 'USUV71301934',
'ext': 'mp4',
'uploader': 'Katy Perry',
'upload_date': '20130905',
},
u'params': {
u'skip_download': True,
'params': {
'skip_download': True,
},
u'skip': u'VEVO is only available in some countries',
'skip': 'VEVO is only available in some countries',
},
# age-restricted video
{
u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
u'file': u'xyh2zz.mp4',
u'md5': u'0d667a7b9cebecc3c89ee93099c4159d',
u'info_dict': {
u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
u'uploader': 'HotWaves1012',
u'age_limit': 18,
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
'md5': '0d667a7b9cebecc3c89ee93099c4159d',
'info_dict': {
'id': 'xyh2zz',
'ext': 'mp4',
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
'uploader': 'HotWaves1012',
'age_limit': 18,
}
}
]
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
url = 'http://www.dailymotion.com/video/%s' % video_id
# Retrieve video webpage to extract further information
@ -97,8 +98,8 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')
self.to_screen(u'Vevo video detected: %s' % vevo_id)
return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo')
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
age_limit = self._rta_search(webpage)
@ -109,7 +110,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
embed_page = self._download_webpage(embed_url, video_id,
u'Downloading embed page')
'Downloading embed page')
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE)
info = json.loads(info)
@ -134,7 +135,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
'height': height,
})
if not formats:
raise ExtractorError(u'Unable to extract video URL')
raise ExtractorError('Unable to extract video URL')
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
@ -142,18 +143,23 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
self._list_available_subtitles(video_id, webpage)
return
view_count = self._search_regex(
r'video_views_count[^>]+>\s+([\d\.,]+)', webpage, u'view count', fatal=False)
if view_count is not None:
view_count = str_to_int(view_count)
view_count = str_to_int(self._search_regex(
r'video_views_count[^>]+>\s+([\d\.,]+)',
webpage, 'view count', fatal=False))
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
'title')
return {
'id': video_id,
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
'upload_date': video_upload_date,
'title': self._og_search_title(webpage),
'subtitles': video_subtitles,
'upload_date': video_upload_date,
'title': title,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
@ -165,28 +171,35 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
return sub_lang_list
self._downloader.report_warning(u'video doesn\'t have subtitles')
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = u'dailymotion:playlist'
IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
_TESTS = [{
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
'info_dict': {
'title': 'SPORT',
},
'playlist_mincount': 20,
}]
def _extract_entries(self, id):
video_ids = []
for pagenum in itertools.count(1):
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
webpage = self._download_webpage(request,
id, u'Downloading page %s' % pagenum)
id, 'Downloading page %s' % pagenum)
video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
@ -209,9 +222,17 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = u'dailymotion:user'
IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
'info_dict': {
'id': 'nqtv',
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@ -219,7 +240,7 @@ class DailymotionUserIE(DailymotionPlaylistIE):
webpage = self._download_webpage(url, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, u'user', flags=re.DOTALL))
webpage, 'user'))
return {
'_type': 'playlist',

View File

@ -11,10 +11,10 @@ from ..utils import (
class DaumIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TEST = {
_TESTS = [{
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
'info_dict': {
'id': '52554690',
@ -24,11 +24,17 @@ class DaumIE(InfoExtractor):
'upload_date': '20130831',
'duration': 3868,
},
}
}, {
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
'only_matching': True,
}, {
'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
video_id = mobj.group('id')
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
webpage = self._download_webpage(canonical_url, video_id)
full_id = self._search_regex(
@ -42,7 +48,6 @@ class DaumIE(InfoExtractor):
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
video_id, 'Downloading video formats info')
self.to_screen(u'%s: Getting video urls' % video_id)
formats = []
for format_el in urls.findall('result/output_list/output_list'):
profile = format_el.attrib['profile']
@ -52,7 +57,7 @@ class DaumIE(InfoExtractor):
})
url_doc = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
video_id, note=False)
video_id, note='Downloading video data for %s format' % profile)
format_url = url_doc.find('result/url').text
formats.append({
'url': format_url,

View File

@ -0,0 +1,74 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
clean_html,
)
class DBTVIE(InfoExtractor):
_VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)'
_TEST = {
'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc',
'info_dict': {
'id': '33100',
'display_id': 'Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
'ext': 'mp4',
'title': 'Skulle teste ut fornøyelsespark, men kollegaen var bare opptatt av bikinikroppen',
'description': 'md5:1504a54606c4dde3e4e61fc97aa857e0',
'thumbnail': 're:https?://.*\.jpg$',
'timestamp': 1404039863.438,
'upload_date': '20140629',
'duration': 69.544,
'view_count': int,
'categories': list,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
data = self._download_json(
'http://api.dbtv.no/discovery/%s' % video_id, display_id)
video = data['playlist'][0]
formats = [{
'url': f['URL'],
'vcodec': f.get('container'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'vbr': float_or_none(f.get('rate'), 1000),
'filesize': int_or_none(f.get('size')),
} for f in video['renditions'] if 'URL' in f]
if not formats:
for url_key, format_id in [('URL', 'mp4'), ('HLSURL', 'hls')]:
if url_key in video:
formats.append({
'url': video[url_key],
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video['id'],
'display_id': display_id,
'title': video['title'],
'description': clean_html(video['desc']),
'thumbnail': video.get('splash') or video.get('thumb'),
'timestamp': float_or_none(video.get('publishedAt'), 1000),
'duration': float_or_none(video.get('length'), 1000),
'view_count': int_or_none(video.get('views')),
'categories': video.get('tags'),
'formats': formats,
}

View File

@ -0,0 +1,89 @@
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
orderedSet,
)
class DeezerPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?deezer\.com/playlist/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.deezer.com/playlist/176747451',
'info_dict': {
'id': '176747451',
'title': 'Best!',
'uploader': 'Anonymous',
'thumbnail': 're:^https?://cdn-images.deezer.com/images/cover/.*\.jpg$',
},
'playlist_count': 30,
'skip': 'Only available in .de',
}
def _real_extract(self, url):
if 'test' not in self._downloader.params:
self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
geoblocking_msg = self._html_search_regex(
r'<p class="soon-txt">(.*?)</p>', webpage, 'geoblocking message',
default=None)
if geoblocking_msg is not None:
raise ExtractorError(
'Deezer said: %s' % geoblocking_msg, expected=True)
data_json = self._search_regex(
r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n', webpage, 'data JSON')
data = json.loads(data_json)
playlist_title = data.get('DATA', {}).get('TITLE')
playlist_uploader = data.get('DATA', {}).get('PARENT_USERNAME')
playlist_thumbnail = self._search_regex(
r'<img id="naboo_playlist_image".*?src="([^"]+)"', webpage,
'playlist thumbnail')
preview_pattern = self._search_regex(
r"var SOUND_PREVIEW_GATEWAY\s*=\s*'([^']+)';", webpage,
'preview URL pattern', fatal=False)
entries = []
for s in data['SONGS']['data']:
puid = s['MD5_ORIGIN']
preview_video_url = preview_pattern.\
replace('{0}', puid[0]).\
replace('{1}', puid).\
replace('{2}', s['MEDIA_VERSION'])
formats = [{
'format_id': 'preview',
'url': preview_video_url,
'preference': -100, # Only the first 30 seconds
'ext': 'mp3',
}]
self._sort_formats(formats)
artists = ', '.join(
orderedSet(a['ART_NAME'] for a in s['ARTISTS']))
entries.append({
'id': s['SNG_ID'],
'duration': int_or_none(s.get('DURATION')),
'title': '%s - %s' % (artists, s['SNG_TITLE']),
'uploader': s['ART_NAME'],
'uploader_id': s['ART_ID'],
'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
'formats': formats,
})
return {
'_type': 'playlist',
'id': playlist_id,
'title': playlist_title,
'uploader': playlist_uploader,
'thumbnail': playlist_thumbnail,
'entries': entries,
}

View File

@ -7,7 +7,7 @@ class DivxStageIE(NovaMovIE):
IE_NAME = 'divxstage'
IE_DESC = 'DivxStage'
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag)'}
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'}
_HOST = 'www.divxstage.eu'
@ -24,4 +24,4 @@ class DivxStageIE(NovaMovIE):
'title': 'youtubedl test video',
'description': 'This is a test video for youtubedl.',
}
}
}

View File

@ -5,27 +5,32 @@ import os.path
import re
from .common import InfoExtractor
from ..utils import compat_urllib_parse_unquote
from ..utils import compat_urllib_parse_unquote, url_basename
class DropboxIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
_TEST = {
'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4',
'md5': '8a3d905427a6951ccb9eb292f154530b',
_VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
_TESTS = [{
'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
'info_dict': {
'id': 'nelirfsxnmcfbfh',
'ext': 'mp4',
'title': 'youtube-dl test video \'ä"BaW_jenozKc'
}
}
},
{
'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
fn = compat_urllib_parse_unquote(mobj.group('title'))
fn = compat_urllib_parse_unquote(url_basename(url))
title = os.path.splitext(fn)[0]
video_url = url + '?dl=1'
video_url = re.sub(r'[?&]dl=0', '', url)
video_url += ('?' if '?' not in video_url else '&') + 'dl=1'
return {
'id': video_id,

View File

@ -0,0 +1,70 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import str_to_int
class DrTuberIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?drtuber\.com/video/(?P<id>\d+)/(?P<display_id>[\w-]+)'
_TEST = {
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
'info_dict': {
'id': '1740434',
'display_id': 'hot-perky-blonde-naked-golf',
'ext': 'mp4',
'title': 'Hot Perky Blonde Naked Golf',
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['Babe', 'Blonde', 'Erotic', 'Outdoor', 'Softcore', 'Solo'],
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
video_url = self._html_search_regex(
r'<source src="([^"]+)"', webpage, 'video URL')
title = self._html_search_regex(
r'<title>([^<]+)\s*-\s*Free', webpage, 'title')
thumbnail = self._html_search_regex(
r'poster="([^"]+)"',
webpage, 'thumbnail', fatal=False)
like_count = str_to_int(self._html_search_regex(
r'<span id="rate_likes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
webpage, 'like count', fatal=False))
dislike_count = str_to_int(self._html_search_regex(
r'<span id="rate_dislikes">\s*<img[^>]+>\s*<span>([\d,\.]+)</span>',
webpage, 'like count', fatal=False))
comment_count = str_to_int(self._html_search_regex(
r'<span class="comments_count">([\d,\.]+)</span>',
webpage, 'comment count', fatal=False))
cats_str = self._search_regex(
r'<span>Categories:</span><div>(.+?)</div>', webpage, 'categories', fatal=False)
categories = [] if not cats_str else re.findall(r'<a title="([^"]+)"', cats_str)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'age_limit': self._rta_search(webpage),
}

View File

@ -8,7 +8,7 @@ from ..utils import parse_iso8601
class DRTVIE(SubtitlesInfoExtractor):
_VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/[^/]+/(?P<id>[\da-z-]+)'
_VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)'
_TEST = {
'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8',

View File

@ -1,19 +1,21 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import determine_ext
class EbaumsWorldIE(InfoExtractor):
_VALID_URL = r'https?://www\.ebaumsworld\.com/video/watch/(?P<id>\d+)'
_TEST = {
u'url': u'http://www.ebaumsworld.com/video/watch/83367677/',
u'file': u'83367677.mp4',
u'info_dict': {
u'title': u'A Giant Python Opens The Door',
u'description': u'This is how nightmares start...',
u'uploader': u'jihadpizza',
'url': 'http://www.ebaumsworld.com/video/watch/83367677/',
'info_dict': {
'id': '83367677',
'ext': 'mp4',
'title': 'A Giant Python Opens The Door',
'description': 'This is how nightmares start...',
'uploader': 'jihadpizza',
},
}
@ -28,7 +30,6 @@ class EbaumsWorldIE(InfoExtractor):
'id': video_id,
'title': config.find('title').text,
'url': video_url,
'ext': determine_ext(video_url),
'description': config.find('description').text,
'thumbnail': config.find('image').text,
'uploader': config.find('username').text,

View File

@ -1,10 +1,13 @@
# coding: utf-8
from __future__ import unicode_literals
import json
import random
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_str,
)
@ -12,86 +15,98 @@ class EightTracksIE(InfoExtractor):
IE_NAME = '8tracks'
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
_TEST = {
u"name": u"EightTracks",
u"url": u"http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
u"playlist": [
"name": "EightTracks",
"url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
"info_dict": {
'id': '1336550',
'display_id': 'youtube-dl-test-tracks-a',
"description": "test chars: \"'/\\ä↭",
"title": "youtube-dl test tracks \"'/\\ä↭<>",
},
"playlist": [
{
u"file": u"11885610.m4a",
u"md5": u"96ce57f24389fc8734ce47f4c1abcc55",
u"info_dict": {
u"title": u"youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "96ce57f24389fc8734ce47f4c1abcc55",
"info_dict": {
"id": "11885610",
"ext": "m4a",
"title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
u"file": u"11885608.m4a",
u"md5": u"4ab26f05c1f7291ea460a3920be8021f",
u"info_dict": {
u"title": u"youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "4ab26f05c1f7291ea460a3920be8021f",
"info_dict": {
"id": "11885608",
"ext": "m4a",
"title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
u"file": u"11885679.m4a",
u"md5": u"d30b5b5f74217410f4689605c35d1fd7",
u"info_dict": {
u"title": u"youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "d30b5b5f74217410f4689605c35d1fd7",
"info_dict": {
"id": "11885679",
"ext": "m4a",
"title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
u"file": u"11885680.m4a",
u"md5": u"4eb0a669317cd725f6bbd336a29f923a",
u"info_dict": {
u"title": u"youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "4eb0a669317cd725f6bbd336a29f923a",
"info_dict": {
"id": "11885680",
"ext": "m4a",
"title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
u"file": u"11885682.m4a",
u"md5": u"1893e872e263a2705558d1d319ad19e8",
u"info_dict": {
u"title": u"PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "1893e872e263a2705558d1d319ad19e8",
"info_dict": {
"id": "11885682",
"ext": "m4a",
"title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
u"file": u"11885683.m4a",
u"md5": u"b673c46f47a216ab1741ae8836af5899",
u"info_dict": {
u"title": u"PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "b673c46f47a216ab1741ae8836af5899",
"info_dict": {
"id": "11885683",
"ext": "m4a",
"title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
u"file": u"11885684.m4a",
u"md5": u"1d74534e95df54986da7f5abf7d842b7",
u"info_dict": {
u"title": u"phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "1d74534e95df54986da7f5abf7d842b7",
"info_dict": {
"id": "11885684",
"ext": "m4a",
"title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
},
{
u"file": u"11885685.m4a",
u"md5": u"f081f47af8f6ae782ed131d38b9cd1c0",
u"info_dict": {
u"title": u"phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
u"uploader_id": u"ytdl"
"md5": "f081f47af8f6ae782ed131d38b9cd1c0",
"info_dict": {
"id": "11885685",
"ext": "m4a",
"title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
"uploader_id": "ytdl"
}
}
]
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
json_like = self._search_regex(
r"(?s)PAGE.mix = (.*?);\n", webpage, 'trax information')
data = json.loads(json_like)
session = str(random.randint(0, 1000000000))
@ -99,21 +114,30 @@ class EightTracksIE(InfoExtractor):
track_count = data['tracks_count']
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
next_url = first_url
res = []
entries = []
for i in range(track_count):
api_json = self._download_webpage(next_url, playlist_id,
note=u'Downloading song information %s/%s' % (str(i+1), track_count),
errnote=u'Failed to download song information')
api_json = self._download_webpage(
next_url, playlist_id,
note='Downloading song information %d/%d' % (i + 1, track_count),
errnote='Failed to download song information')
api_data = json.loads(api_json)
track_data = api_data[u'set']['track']
track_data = api_data['set']['track']
info = {
'id': track_data['id'],
'id': compat_str(track_data['id']),
'url': track_data['track_file_stream_url'],
'title': track_data['performer'] + u' - ' + track_data['name'],
'raw_title': track_data['name'],
'uploader_id': data['user']['login'],
'ext': 'm4a',
}
res.append(info)
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
return res
entries.append(info)
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (
session, mix_id, track_data['id'])
return {
'_type': 'playlist',
'entries': entries,
'id': compat_str(mix_id),
'display_id': playlist_id,
'title': data.get('name'),
'description': data.get('description'),
}

View File

@ -0,0 +1,61 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class EinthusanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P<id>[0-9]+)'
_TESTS = [
{
'url': 'http://www.einthusan.com/movies/watch.php?id=2447',
'md5': 'af244f4458cd667205e513d75da5b8b1',
'info_dict': {
'id': '2447',
'ext': 'mp4',
'title': 'Ek Villain',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:9d29fc91a7abadd4591fb862fa560d93',
}
},
{
'url': 'http://www.einthusan.com/movies/watch.php?id=1671',
'md5': 'ef63c7a803e22315880ed182c10d1c5c',
'info_dict': {
'id': '1671',
'ext': 'mp4',
'title': 'Soodhu Kavvuum',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:05d8a0c0281a4240d86d76e14f2f4d51',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(
r'<h1><a class="movie-title".*?>(.*?)</a></h1>', webpage, 'title')
video_url = self._html_search_regex(
r'''(?s)jwplayer\("mediaplayer"\)\.setup\({.*?'file': '([^']+)'.*?}\);''',
webpage, 'video url')
description = self._html_search_meta('description', webpage)
thumbnail = self._html_search_regex(
r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
webpage, "thumbnail url", fatal=False)
if thumbnail is not None:
thumbnail = thumbnail.replace('..', 'http://www.einthusan.com')
return {
'id': video_id,
'title': video_title,
'url': video_url,
'thumbnail': thumbnail,
'description': description,
}

View File

@ -1,4 +1,6 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -7,20 +9,20 @@ from ..utils import ExtractorError
class EitbIE(InfoExtractor):
IE_NAME = u'eitb.tv'
IE_NAME = 'eitb.tv'
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
_TEST = {
u'add_ie': ['Brightcove'],
u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
u'md5': u'edf4436247185adee3ea18ce64c47998',
u'info_dict': {
u'id': u'2743577154001',
u'ext': u'mp4',
u'title': u'60 minutos (Lasa y Zabala, 30 años)',
'add_ie': ['Brightcove'],
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
'md5': 'edf4436247185adee3ea18ce64c47998',
'info_dict': {
'id': '2743577154001',
'ext': 'mp4',
'title': '60 minutos (Lasa y Zabala, 30 años)',
# All videos from eitb has this description in the brightcove info
u'description': u'.',
u'uploader': u'Euskal Telebista',
'description': '.',
'uploader': 'Euskal Telebista',
},
}
@ -30,7 +32,7 @@ class EitbIE(InfoExtractor):
webpage = self._download_webpage(url, chapter_id)
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
if bc_url is None:
raise ExtractorError(u'Could not extract the Brightcove url')
raise ExtractorError('Could not extract the Brightcove url')
# The BrightcoveExperience object doesn't contain the video id, we set
# it manually
bc_url += '&%40videoPlayer={0}'.format(chapter_id)

View File

@ -1,54 +1,25 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .tnaflix import TNAFlixIE
class EmpflixIE(InfoExtractor):
_VALID_URL = r'^https?://www\.empflix\.com/videos/.*?-(?P<id>[0-9]+)\.html'
class EMPFlixIE(TNAFlixIE):
_VALID_URL = r'^https?://www\.empflix\.com/videos/(?P<display_id>[0-9a-zA-Z-]+)-(?P<id>[0-9]+)\.html'
_TITLE_REGEX = r'name="title" value="(?P<title>[^"]*)"'
_DESCRIPTION_REGEX = r'name="description" value="([^"]*)"'
_CONFIG_REGEX = r'flashvars\.config\s*=\s*escape\("([^"]+)"'
_TEST = {
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
'md5': 'b1bc15b6412d33902d6e5952035fcabc',
'info_dict': {
'id': '33051',
'display_id': 'Amateur-Finger-Fuck',
'ext': 'mp4',
'title': 'Amateur Finger Fuck',
'description': 'Amateur solo finger fucking.',
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'name="title" value="(?P<title>[^"]*)"', webpage, 'title')
video_description = self._html_search_regex(
r'name="description" value="([^"]*)"', webpage, 'description', fatal=False)
cfg_url = self._html_search_regex(
r'flashvars\.config = escape\("([^"]+)"',
webpage, 'flashvars.config')
cfg_xml = self._download_xml(
cfg_url, video_id, note='Downloading metadata')
formats = [
{
'url': item.find('videoLink').text,
'format_id': item.find('res').text,
} for item in cfg_xml.findall('./quality/item')
]
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
'age_limit': age_limit,
}

View File

@ -0,0 +1,75 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
str_to_int,
)
class EpornerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
_TEST = {
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
'md5': '39d486f046212d8e1b911c52ab4691f8',
'info_dict': {
'id': '95008',
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
'ext': 'mp4',
'title': 'Infamous Tiffany Teen Strip Tease Video',
'duration': 194,
'view_count': int,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
r'<title>(.*?) - EPORNER', webpage, 'title')
redirect_code = self._html_search_regex(
r'<script type="text/javascript" src="/config5/%s/([a-f\d]+)/">' % video_id,
webpage, 'redirect_code')
redirect_url = 'http://www.eporner.com/config5/%s/%s' % (video_id, redirect_code)
player_code = self._download_webpage(
redirect_url, display_id, note='Downloading player config')
sources = self._search_regex(
r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', player_code, 'sources')
formats = []
for video_url, format_id in re.findall(r'file\s*:\s*"([^"]+)",\s*label\s*:\s*"([^"]+)"', sources):
fmt = {
'url': video_url,
'format_id': format_id,
}
m = re.search(r'^(\d+)', format_id)
if m:
fmt['height'] = int(m.group(1))
formats.append(fmt)
self._sort_formats(formats)
duration = parse_duration(self._search_regex(
r'class="mbtim">([0-9:]+)</div>', webpage, 'duration',
fatal=False))
view_count = str_to_int(self._search_regex(
r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'duration': duration,
'view_count': view_count,
'formats': formats,
'age_limit': self._rta_search(webpage),
}

View File

@ -12,10 +12,11 @@ from ..utils import (
class EveryonesMixtapeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?everyonesmixtape\.com/#/mix/(?P<id>[0-9a-zA-Z]+)(?:/(?P<songnr>[0-9]))?$'
_TEST = {
_TESTS = [{
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
'file': '5bfseWNmlds.mp4',
"info_dict": {
'id': '5bfseWNmlds',
'ext': 'mp4',
"title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
"uploader": "FKR.TV",
"uploader_id": "frenchkissrecords",
@ -25,7 +26,14 @@ class EveryonesMixtapeIE(InfoExtractor):
'params': {
'skip_download': True, # This is simply YouTube
}
}
}, {
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi',
'info_dict': {
'id': 'm7m0jJAbMQi',
'title': 'Driving',
},
'playlist_count': 24
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)

View File

@ -0,0 +1,73 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class ExpoTVIE(InfoExtractor):
_VALID_URL = r'https?://www\.expotv\.com/videos/[^?#]*/(?P<id>[0-9]+)($|[?#])'
_TEST = {
'url': 'http://www.expotv.com/videos/reviews/1/24/LinneCardscom/17561',
'md5': '2985e6d7a392b2f7a05e0ca350fe41d0',
'info_dict': {
'id': '17561',
'ext': 'mp4',
'upload_date': '20060212',
'title': 'My Favorite Online Scrapbook Store',
'view_count': int,
'description': 'You\'ll find most everything you need at this virtual store front.',
'uploader': 'Anna T.',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
player_key = self._search_regex(
r'<param name="playerKey" value="([^"]+)"', webpage, 'player key')
config_url = 'http://client.expotv.com/video/config/%s/%s' % (
video_id, player_key)
config = self._download_json(
config_url, video_id,
note='Downloading video configuration')
formats = [{
'url': fcfg['file'],
'height': int_or_none(fcfg.get('height')),
'format_note': fcfg.get('label'),
'ext': self._search_regex(
r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'],
'file extension', default=None),
} for fcfg in config['sources']]
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = config.get('image')
view_count = int_or_none(self._search_regex(
r'<h5>Plays: ([0-9]+)</h5>', webpage, 'view counts'))
uploader = self._search_regex(
r'<div class="reviewer">\s*<img alt="([^"]+)"', webpage, 'uploader',
fatal=False)
upload_date = unified_strdate(self._search_regex(
r'<h5>Reviewed on ([0-9/.]+)</h5>', webpage, 'upload date',
fatal=False))
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'view_count': view_count,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
}

View File

@ -7,6 +7,7 @@ from ..utils import (
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urllib_parse,
str_to_int,
)
@ -20,6 +21,7 @@ class ExtremeTubeIE(InfoExtractor):
'ext': 'mp4',
'title': 'Music Video 14 british euro brit european cumshots swallow',
'uploader': 'unknown',
'view_count': int,
'age_limit': 18,
}
}, {
@ -39,8 +41,12 @@ class ExtremeTubeIE(InfoExtractor):
video_title = self._html_search_regex(
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
uploader = self._html_search_regex(
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
fatal=False)
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
webpage, 'uploader', fatal=False)
view_count = str_to_int(self._html_search_regex(
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
webpage, 'view count', fatal=False))
video_url = compat_urllib_parse.unquote(self._html_search_regex(
r'video_url=(.+?)&amp;', webpage, 'video_url'))
path = compat_urllib_parse_urlparse(video_url).path
@ -51,6 +57,7 @@ class ExtremeTubeIE(InfoExtractor):
'id': video_id,
'title': video_title,
'uploader': uploader,
'view_count': view_count,
'url': video_url,
'format': format,
'format_id': format,

View File

@ -12,8 +12,8 @@ from ..utils import (
compat_urllib_parse,
compat_urllib_request,
urlencode_postdata,
ExtractorError,
limit_length,
)
@ -21,23 +21,34 @@ class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
(?:video/video\.php|photo\.php|video/embed)\?(?:.*?)
(?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
(?:v|video_id)=(?P<id>[0-9]+)
(?:.*)'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_TEST = {
'url': 'https://www.facebook.com/photo.php?v=120708114770723',
'md5': '48975a41ccc4b7a581abd68651c1a5a8',
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '120708114770723',
'id': '637842556329505',
'ext': 'mp4',
'duration': 279,
'title': 'PEOPLE ARE AWESOME 2013',
'duration': 38,
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
}
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
}
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}]
def _login(self):
(useremail, password) = self._get_login_info()
@ -76,7 +87,8 @@ class FacebookIE(InfoExtractor):
check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
'h': self._search_regex(r'name="h" value="(\w*?)"', login_results, 'h'),
'h': self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
@ -121,7 +133,15 @@ class FacebookIE(InfoExtractor):
raise ExtractorError('Cannot find video URL')
video_title = self._html_search_regex(
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title')
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
fatal=False)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', default=None)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
return {
'id': video_id,

View File

@ -10,13 +10,13 @@ from ..utils import (
class FlickrIE(InfoExtractor):
"""Information Extractor for Flickr videos"""
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
_VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
_TEST = {
'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
'file': '5645318632.mp4',
'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
'info_dict': {
'id': '5645318632',
'ext': 'mp4',
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
"uploader_id": "forestwander-nature-pictures",
"title": "Dark Hollow Waterfalls"
@ -49,12 +49,12 @@ class FlickrIE(InfoExtractor):
raise ExtractorError('Unable to extract video url')
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
return [{
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': self._og_search_title(webpage),
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'uploader_id': video_uploader_id,
}]
}

View File

@ -4,16 +4,21 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class FranceInterIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
'file': '793962.mp3',
'md5': '4764932e466e6f6c79c317d2e74f6884',
"info_dict": {
"title": "LHistoire dans les jeux vidéo",
'id': '793962',
'ext': 'mp3',
'title': 'LHistoire dans les jeux vidéo',
'description': 'md5:7e93ddb4451e7530022792240a3049c7',
'timestamp': 1387369800,
'upload_date': '20131218',
},
}
@ -22,17 +27,26 @@ class FranceInterIE(InfoExtractor):
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
path = self._search_regex(
r'&urlAOD=(.*?)&startTime', webpage, 'video url')
r'<a id="player".+?href="([^"]+)"', webpage, 'video url')
video_url = 'http://www.franceinter.fr/' + path
title = self._html_search_regex(
r'<span class="title">(.+?)</span>', webpage, 'title')
description = self._html_search_regex(
r'<span class="description">(.*?)</span>',
webpage, 'description', fatal=False)
timestamp = int_or_none(self._search_regex(
r'data-date="(\d+)"', webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'formats': [{
'url': video_url,
'vcodec': 'none',
}],
'title': title,
}

View File

@ -8,45 +8,68 @@ import json
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
ExtractorError,
clean_html,
parse_duration,
compat_urllib_parse_urlparse,
int_or_none,
)
class FranceTVBaseInfoExtractor(InfoExtractor):
def _extract_video(self, video_id):
info = self._download_xml(
'http://www.francetvinfo.fr/appftv/webservices/video/'
'getInfosOeuvre.php?id-diffusion='
+ video_id, video_id, 'Downloading XML config')
def _extract_video(self, video_id, catalogue):
info = self._download_json(
'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s'
% (video_id, catalogue),
video_id, 'Downloading video JSON')
manifest_url = info.find('videos/video/url').text
manifest_url = manifest_url.replace('/z/', '/i/')
if manifest_url.startswith('rtmp'):
formats = [{'url': manifest_url, 'ext': 'flv'}]
else:
formats = []
available_formats = self._search_regex(r'/[^,]*,(.*?),k\.mp4', manifest_url, 'available formats')
for index, format_descr in enumerate(available_formats.split(',')):
format_info = {
'url': manifest_url.replace('manifest.f4m', 'index_%d_av.m3u8' % index),
'ext': 'mp4',
}
m_resolution = re.search(r'(?P<width>\d+)x(?P<height>\d+)', format_descr)
if m_resolution is not None:
format_info.update({
'width': int(m_resolution.group('width')),
'height': int(m_resolution.group('height')),
})
formats.append(format_info)
if info.get('status') == 'NOK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
thumbnail_path = info.find('image').text
formats = []
for video in info['videos']:
if video['statut'] != 'ONLINE':
continue
video_url = video['url']
if not video_url:
continue
format_id = video['format']
if video_url.endswith('.f4m'):
video_url_parsed = compat_urllib_parse_urlparse(video_url)
f4m_url = self._download_webpage(
'http://hdfauth.francetv.fr/esi/urltokengen2.html?url=%s' % video_url_parsed.path,
video_id, 'Downloading f4m manifest token', fatal=False)
if f4m_url:
f4m_formats = self._extract_f4m_formats(f4m_url, video_id)
for f4m_format in f4m_formats:
f4m_format['preference'] = 1
formats.extend(f4m_formats)
elif video_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(video_url, video_id))
elif video_url.startswith('rtmp'):
formats.append({
'url': video_url,
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
'preference': 1,
})
else:
formats.append({
'url': video_url,
'format_id': format_id,
'preference': 2,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': info.find('titre').text,
'title': info['titre'],
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
'duration': parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'formats': formats,
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
'description': info.find('synopsis').text,
}
@ -61,7 +84,7 @@ class PluzzIE(FranceTVBaseInfoExtractor):
webpage = self._download_webpage(url, title)
video_id = self._search_regex(
r'data-diffusion="(\d+)"', webpage, 'ID')
return self._extract_video(video_id)
return self._extract_video(video_id, 'Pluzz')
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
@ -70,13 +93,13 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
_TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
'md5': '9cecf35f99c4079c199e9817882a9a1c',
'info_dict': {
'id': '84981923',
'ext': 'mp4',
'ext': 'flv',
'title': 'Soir 3',
},
'params': {
'skip_download': True,
'upload_date': '20130826',
'timestamp': 1377548400,
},
}, {
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
@ -88,15 +111,17 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
},
'params': {
'skip_download': 'HLS (reqires ffmpeg)'
}
},
'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
video_id = self._search_regex(r'id-video=((?:[^0-9]*?_)?[0-9]+)[@"]', webpage, 'video id')
return self._extract_video(video_id)
video_id, catalogue = self._search_regex(
r'id-video=([^@]+@[^"]+)', webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)
class FranceTVIE(FranceTVBaseInfoExtractor):
@ -112,91 +137,77 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
# france2
{
'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
'file': '75540104.mp4',
'md5': 'c03fc87cb85429ffd55df32b9fc05523',
'info_dict': {
'title': '13h15, le samedi...',
'description': 'md5:2e5b58ba7a2d3692b35c792be081a03d',
},
'params': {
# m3u8 download
'skip_download': True,
'id': '109169362',
'ext': 'flv',
'title': '13h15, le dimanche...',
'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7',
'upload_date': '20140914',
'timestamp': 1410693600,
},
},
# france3
{
'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
'md5': '679bb8f8921f8623bd658fa2f8364da0',
'info_dict': {
'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
'ext': 'flv',
'ext': 'mp4',
'title': 'Le scandale du prix des médicaments',
'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
},
'params': {
# rtmp download
'skip_download': True,
'upload_date': '20131113',
'timestamp': 1384380000,
},
},
# france4
{
'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c',
'info_dict': {
'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
'ext': 'flv',
'ext': 'mp4',
'title': 'Hero Corp Making of - Extrait 1',
'description': 'md5:c87d54871b1790679aec1197e73d650a',
},
'params': {
# rtmp download
'skip_download': True,
'upload_date': '20131106',
'timestamp': 1383766500,
},
},
# france5
{
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
'md5': '78f0f4064f9074438e660785bbf2c5d9',
'info_dict': {
'id': '92837968',
'ext': 'mp4',
'id': '108961659',
'ext': 'flv',
'title': 'C à dire ?!',
'description': 'md5:fb1db1cbad784dcce7c7a7bd177c8e2f',
},
'params': {
# m3u8 download
'skip_download': True,
'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
'upload_date': '20140915',
'timestamp': 1410795000,
},
},
# franceo
{
'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013',
'md5': '52f0bfe202848b15915a2f39aaa8981b',
'info_dict': {
'id': '92327925',
'ext': 'mp4',
'title': 'Infô-Afrique',
'id': '108634970',
'ext': 'flv',
'title': 'Infô Afrique',
'description': 'md5:ebf346da789428841bee0fd2a935ea55',
'upload_date': '20140915',
'timestamp': 1410822000,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'The id changes frequently',
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('key'):
webpage = self._download_webpage(url, mobj.group('key'))
id_res = [
(r'''(?x)<div\s+class="video-player">\s*
<a\s+href="http://videos.francetv.fr/video/([0-9]+)"\s+
class="francetv-video-player">'''),
(r'<a id="player_direct" href="http://info\.francetelevisions'
'\.fr/\?id-video=([^"/&]+)'),
(r'<a class="video" id="ftv_player_(.+?)"'),
]
video_id = self._html_search_regex(id_res, webpage, 'video ID')
else:
video_id = mobj.group('id')
return self._extract_video(video_id)
webpage = self._download_webpage(url, mobj.group('key') or mobj.group('id'))
video_id, catalogue = self._html_search_regex(
r'href="http://videos\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._extract_video(video_id, catalogue)
class GenerationQuoiIE(InfoExtractor):
@ -232,16 +243,15 @@ class CultureboxIE(FranceTVBaseInfoExtractor):
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
_TEST = {
'url': 'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813',
'url': 'http://culturebox.francetvinfo.fr/festivals/dans-les-jardins-de-william-christie/dans-les-jardins-de-william-christie-le-camus-162553',
'md5': '5ad6dec1ffb2a3fbcb20cc4b744be8d6',
'info_dict': {
'id': 'EV_6785',
'ext': 'mp4',
'title': 'Einstein on the beach au Théâtre du Châtelet',
'description': 'md5:9ce2888b1efefc617b5e58b3f6200eeb',
},
'params': {
# m3u8 download
'skip_download': True,
'id': 'EV_22853',
'ext': 'flv',
'title': 'Dans les jardins de William Christie - Le Camus',
'description': 'md5:4710c82315c40f0c865ca8b9a68b5299',
'upload_date': '20140829',
'timestamp': 1409317200,
},
}
@ -249,5 +259,7 @@ class CultureboxIE(FranceTVBaseInfoExtractor):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, 'video id')
return self._extract_video(video_id)
video_id, catalogue = self._search_regex(
r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
return self._extract_video(video_id, catalogue)

View File

@ -21,7 +21,7 @@ class FunnyOrDieIE(InfoExtractor):
},
}, {
'url': 'http://www.funnyordie.com/embed/e402820827',
'md5': 'ff4d83318f89776ed0250634cfaa8d36',
'md5': '29f4c5e5a61ca39dfd7e8348a75d0aad',
'info_dict': {
'id': 'e402820827',
'ext': 'mp4',

View File

@ -88,3 +88,28 @@ class GameOneIE(InfoExtractor):
'age_limit': age_limit,
'timestamp': timestamp,
}
class GameOnePlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gameone\.de(?:/tv)?/?$'
IE_NAME = 'gameone:playlist'
_TEST = {
'url': 'http://www.gameone.de/tv',
'info_dict': {
'title': 'GameOne',
},
'playlist_mincount': 294,
}
def _real_extract(self, url):
webpage = self._download_webpage('http://www.gameone.de/tv', 'TV')
max_id = max(map(int, re.findall(r'<a href="/tv/(\d+)"', webpage)))
entries = [
self.url_result('http://www.gameone.de/tv/%d' % video_id, 'GameOne')
for video_id in range(max_id, 0, -1)]
return {
'_type': 'playlist',
'title': 'GameOne',
'entries': entries,
}

View File

@ -8,12 +8,11 @@ import re
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
compat_urllib_error,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
@ -22,6 +21,7 @@ from ..utils import (
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
url_basename,
)
from .brightcove import BrightcoveIE
@ -155,7 +155,6 @@ class GenericIE(InfoExtractor):
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'md5': '7cf780be104d40fea7bae52eed4a470e',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
@ -180,13 +179,13 @@ class GenericIE(InfoExtractor):
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '981',
'id': '1969',
'ext': 'mp4',
'title': 'My web playroom',
'uploader': 'Ze Frank',
'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# Embeded Ustream video
@ -226,21 +225,6 @@ class GenericIE(InfoExtractor):
'skip_download': 'Requires rtmpdump'
}
},
# smotri embed
{
'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
'md5': 'ec40048448e9284c9a1de77bb188108b',
'info_dict': {
'id': 'v27008541fad',
'ext': 'mp4',
'title': 'Крым и Севастополь вошли в состав России',
'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
'duration': 900,
'upload_date': '20140318',
'uploader': 'rbctv_2012_4',
'uploader_id': 'rbctv_2012_4',
},
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
@ -295,13 +279,13 @@ class GenericIE(InfoExtractor):
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': 'jpSGZsgga_I',
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Launch Trailer',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20130821',
'description': 'md5:87bd95f13d8be3e7da87a5f2c443106a',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
@ -330,71 +314,77 @@ class GenericIE(InfoExtractor):
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at Handjob Hub',
}
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:'
},
'playlist_mincount': 11,
},
# Multiple brightcove videos
# https://github.com/rg3/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '8788b683c777a5cf25621eaf286d0c23',
'info_dict': {
'id': '1cfaf6b7ea',
'ext': 'mov',
'title': 'md5:51364a8d3d009997ba99656004b5e20d',
'duration': 643.0,
'filesize': 182808282,
'uploader': 'education-portal.com',
},
},
]
def report_download_webpage(self, video_id):
"""Report webpage download."""
if not self._downloader.params.get('test', False):
self._downloader.report_warning('Falling back on generic information extractor.')
super(GenericIE, self).report_download_webpage(video_id)
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _send_head(self, url):
"""Check if it is a redirect, like url shorteners, in case return the new url."""
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
"""
Subclass the HTTPRedirectHandler to make it use our
HEADRequest also on the redirected URL
"""
def redirect_request(self, req, fp, code, msg, headers, newurl):
if code in (301, 302, 303, 307):
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
try:
# This function was deprecated in python 3.3 and removed in 3.4
origin_req_host = req.get_origin_req_host()
except AttributeError:
origin_req_host = req.origin_req_host
return HEADRequest(newurl,
headers=newheaders,
origin_req_host=origin_req_host,
unverifiable=True)
else:
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
class HTTPMethodFallback(compat_urllib_request.BaseHandler):
"""
Fallback to GET if HEAD is not allowed (405 HTTP error)
"""
def http_error_405(self, req, fp, code, msg, headers):
fp.read()
fp.close()
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True))
# Build our opener
opener = compat_urllib_request.OpenerDirector()
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
HTTPMethodFallback, HEADRedirectHandler,
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
opener.add_handler(handler())
response = opener.open(HEADRequest(url))
if response is None:
raise ExtractorError('Invalid URL protocol')
return response
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
@ -487,17 +477,32 @@ class GenericIE(InfoExtractor):
else:
assert ':' in default_search
return self.url_result(default_search + url)
video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
self.to_screen('%s: Requesting header' % video_id)
try:
response = self._send_head(url)
head_req = HEADRequest(url)
response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if response is not False:
# Check for redirect
new_url = response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
# Check for direct link to a video
@ -518,9 +523,8 @@ class GenericIE(InfoExtractor):
'upload_date': upload_date,
}
except compat_urllib_error.HTTPError:
# This may be a stupid server that doesn't like HEAD, our UA, or so
pass
if not self._downloader.params.get('test', False) and not is_intentional:
self._downloader.report_warning('Falling back on generic information extractor.')
try:
webpage = self._download_webpage(url, video_id)
@ -559,13 +563,25 @@ class GenericIE(InfoExtractor):
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
# Helper method
def _playlist_from_matches(matches, getter, ie=None):
urlrs = orderedSet(self.url_result(getter(m), ie) for m in matches)
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m)), ie)
for m in matches)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
@ -609,12 +625,12 @@ class GenericIE(InfoExtractor):
embedSWF\(?:\s*
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
(?:embed|v)/.+?)
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/.+?)
\1''', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]), ie='Youtube')
matches, lambda m: unescapeHTML(m[1]))
# Look for embedded Dailymotion player
matches = re.findall(
@ -623,6 +639,16 @@ class GenericIE(InfoExtractor):
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return _playlist_from_matches(
playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for embedded Wistia player
match = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
@ -635,6 +661,16 @@ class GenericIE(InfoExtractor):
'title': video_title,
'id': video_id,
}
match = re.search(r'(?:id=["\']wistia_|data-wistiaid=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return {
'_type': 'url_transparent',
'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
'ie_key': 'Wistia',
'uploader': video_uploader,
'title': video_title,
'id': match.group('id')
}
# Look for embedded blip.tv player
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
@ -805,45 +841,64 @@ class GenericIE(InfoExtractor):
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
def check_video(vurl):
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = re.findall(r'''(?sx)
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?file\s*:\s*["\'](.*?)["\']''', webpage)
.*?file\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = re.findall(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
found = filter_video(re.findall(
r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Try to find twitter cards info
found = re.findall(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
def check_video(vurl):
vpath = compat_urlparse.urlparse(vurl).path
return '.' in vpath and not vpath.endswith('.swf')
found = list(filter(
check_video,
re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)))
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
# HTML5 video
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage)
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]+)? src="([^"]+)"', webpage)
if not found:
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'?([^\'"]+)',
webpage)
if found:
new_url = found.group(1)
@ -873,6 +928,7 @@ class GenericIE(InfoExtractor):
'url': video_url,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
})
if len(entries) == 1:

View File

@ -0,0 +1,398 @@
# coding: utf-8
from __future__ import unicode_literals
import random
import math
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
compat_str,
compat_chr,
compat_ord,
)
class GloboIE(InfoExtractor):
_VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
_API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=2.9.9.50&resource_id=%s'
_VIDEOID_REGEXES = [
r'\bdata-video-id="(\d+)"',
r'\bdata-player-videosids="(\d+)"',
r'<div[^>]+\bid="(\d+)"',
]
_RESIGN_EXPIRATION = 86400
_TESTS = [
{
'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/',
'md5': '03ebf41cb7ade43581608b7d9b71fab0',
'info_dict': {
'id': '3654973',
'ext': 'mp4',
'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão',
'duration': 251.585,
'uploader': 'SporTV',
'uploader_id': 698,
'like_count': int,
}
},
{
'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
'info_dict': {
'id': '3607726',
'ext': 'mp4',
'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
'duration': 103.204,
'uploader': 'Globo.com',
'uploader_id': 265,
'like_count': int,
}
},
{
'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
'info_dict': {
'id': '3652183',
'ext': 'mp4',
'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
'duration': 110.711,
'uploader': 'Rede Globo',
'uploader_id': 196,
'like_count': int,
}
},
]
class MD5():
HEX_FORMAT_LOWERCASE = 0
HEX_FORMAT_UPPERCASE = 1
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
BASE64_PAD_CHARACTER_RFC_COMPLIANCE = '='
PADDING = '=0xFF01DD'
hexcase = 0
b64pad = ''
def __init__(self):
pass
class JSArray(list):
def __getitem__(self, y):
try:
return list.__getitem__(self, y)
except IndexError:
return 0
def __setitem__(self, i, y):
try:
return list.__setitem__(self, i, y)
except IndexError:
self.extend([0] * (i - len(self) + 1))
self[-1] = y
@classmethod
def hex_md5(cls, param1):
return cls.rstr2hex(cls.rstr_md5(cls.str2rstr_utf8(param1)))
@classmethod
def b64_md5(cls, param1, param2=None):
return cls.rstr2b64(cls.rstr_md5(cls.str2rstr_utf8(param1, param2)))
@classmethod
def any_md5(cls, param1, param2):
return cls.rstr2any(cls.rstr_md5(cls.str2rstr_utf8(param1)), param2)
@classmethod
def rstr_md5(cls, param1):
return cls.binl2rstr(cls.binl_md5(cls.rstr2binl(param1), len(param1) * 8))
@classmethod
def rstr2hex(cls, param1):
_loc_2 = '0123456789ABCDEF' if cls.hexcase else '0123456789abcdef'
_loc_3 = ''
for _loc_5 in range(0, len(param1)):
_loc_4 = compat_ord(param1[_loc_5])
_loc_3 += _loc_2[_loc_4 >> 4 & 15] + _loc_2[_loc_4 & 15]
return _loc_3
@classmethod
def rstr2b64(cls, param1):
_loc_2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
_loc_3 = ''
_loc_4 = len(param1)
for _loc_5 in range(0, _loc_4, 3):
_loc_6_1 = compat_ord(param1[_loc_5]) << 16
_loc_6_2 = compat_ord(param1[_loc_5 + 1]) << 8 if _loc_5 + 1 < _loc_4 else 0
_loc_6_3 = compat_ord(param1[_loc_5 + 2]) if _loc_5 + 2 < _loc_4 else 0
_loc_6 = _loc_6_1 | _loc_6_2 | _loc_6_3
for _loc_7 in range(0, 4):
if _loc_5 * 8 + _loc_7 * 6 > len(param1) * 8:
_loc_3 += cls.b64pad
else:
_loc_3 += _loc_2[_loc_6 >> 6 * (3 - _loc_7) & 63]
return _loc_3
@staticmethod
def rstr2any(param1, param2):
_loc_3 = len(param2)
_loc_4 = []
_loc_9 = [0] * ((len(param1) >> 2) + 1)
for _loc_5 in range(0, len(_loc_9)):
_loc_9[_loc_5] = compat_ord(param1[_loc_5 * 2]) << 8 | compat_ord(param1[_loc_5 * 2 + 1])
while len(_loc_9) > 0:
_loc_8 = []
_loc_7 = 0
for _loc_5 in range(0, len(_loc_9)):
_loc_7 = (_loc_7 << 16) + _loc_9[_loc_5]
_loc_6 = math.floor(_loc_7 / _loc_3)
_loc_7 -= _loc_6 * _loc_3
if len(_loc_8) > 0 or _loc_6 > 0:
_loc_8[len(_loc_8)] = _loc_6
_loc_4[len(_loc_4)] = _loc_7
_loc_9 = _loc_8
_loc_10 = ''
_loc_5 = len(_loc_4) - 1
while _loc_5 >= 0:
_loc_10 += param2[_loc_4[_loc_5]]
_loc_5 -= 1
return _loc_10
@classmethod
def str2rstr_utf8(cls, param1, param2=None):
_loc_3 = ''
_loc_4 = -1
if not param2:
param2 = cls.PADDING
param1 = param1 + param2[1:9]
while True:
_loc_4 += 1
if _loc_4 >= len(param1):
break
_loc_5 = compat_ord(param1[_loc_4])
_loc_6 = compat_ord(param1[_loc_4 + 1]) if _loc_4 + 1 < len(param1) else 0
if 55296 <= _loc_5 <= 56319 and 56320 <= _loc_6 <= 57343:
_loc_5 = 65536 + ((_loc_5 & 1023) << 10) + (_loc_6 & 1023)
_loc_4 += 1
if _loc_5 <= 127:
_loc_3 += compat_chr(_loc_5)
continue
if _loc_5 <= 2047:
_loc_3 += compat_chr(192 | _loc_5 >> 6 & 31) + compat_chr(128 | _loc_5 & 63)
continue
if _loc_5 <= 65535:
_loc_3 += compat_chr(224 | _loc_5 >> 12 & 15) + compat_chr(128 | _loc_5 >> 6 & 63) + compat_chr(
128 | _loc_5 & 63)
continue
if _loc_5 <= 2097151:
_loc_3 += compat_chr(240 | _loc_5 >> 18 & 7) + compat_chr(128 | _loc_5 >> 12 & 63) + compat_chr(
128 | _loc_5 >> 6 & 63) + compat_chr(128 | _loc_5 & 63)
return _loc_3
@staticmethod
def rstr2binl(param1):
_loc_2 = [0] * ((len(param1) >> 2) + 1)
for _loc_3 in range(0, len(_loc_2)):
_loc_2[_loc_3] = 0
for _loc_3 in range(0, len(param1) * 8, 8):
_loc_2[_loc_3 >> 5] |= (compat_ord(param1[_loc_3 // 8]) & 255) << _loc_3 % 32
return _loc_2
@staticmethod
def binl2rstr(param1):
_loc_2 = ''
for _loc_3 in range(0, len(param1) * 32, 8):
_loc_2 += compat_chr(param1[_loc_3 >> 5] >> _loc_3 % 32 & 255)
return _loc_2
@classmethod
def binl_md5(cls, param1, param2):
param1 = cls.JSArray(param1)
param1[param2 >> 5] |= 128 << param2 % 32
param1[(param2 + 64 >> 9 << 4) + 14] = param2
_loc_3 = 1732584193
_loc_4 = -271733879
_loc_5 = -1732584194
_loc_6 = 271733878
for _loc_7 in range(0, len(param1), 16):
_loc_8 = _loc_3
_loc_9 = _loc_4
_loc_10 = _loc_5
_loc_11 = _loc_6
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 7, -680876936)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 1], 12, -389564586)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 17, 606105819)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 3], 22, -1044525330)
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 7, -176418897)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 5], 12, 1200080426)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 17, -1473231341)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 7], 22, -45705983)
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 7, 1770035416)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 9], 12, -1958414417)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 17, -42063)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 11], 22, -1990404162)
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 7, 1804603682)
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 13], 12, -40341101)
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 17, -1502002290)
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 15], 22, 1236535329)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 5, -165796510)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 6], 9, -1069501632)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 14, 643717713)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 0], 20, -373897302)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 5, -701558691)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 10], 9, 38016083)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 14, -660478335)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 4], 20, -405537848)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 5, 568446438)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 14], 9, -1019803690)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 14, -187363961)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 8], 20, 1163531501)
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 5, -1444681467)
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 2], 9, -51403784)
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 14, 1735328473)
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 12], 20, -1926607734)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 4, -378558)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 8], 11, -2022574463)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 16, 1839030562)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 14], 23, -35309556)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 4, -1530992060)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 4], 11, 1272893353)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 16, -155497632)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 10], 23, -1094730640)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 4, 681279174)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 0], 11, -358537222)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 16, -722521979)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 6], 23, 76029189)
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 4, -640364487)
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 12], 11, -421815835)
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 16, 530742520)
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 2], 23, -995338651)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 6, -198630844)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 7], 10, 1126891415)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 15, -1416354905)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 5], 21, -57434055)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 6, 1700485571)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 3], 10, -1894986606)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 15, -1051523)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 1], 21, -2054922799)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 6, 1873313359)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 15], 10, -30611744)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 15, -1560198380)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 13], 21, 1309151649)
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 6, -145523070)
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 11], 10, -1120210379)
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 15, 718787259)
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 9], 21, -343485551)
_loc_3 = cls.safe_add(_loc_3, _loc_8)
_loc_4 = cls.safe_add(_loc_4, _loc_9)
_loc_5 = cls.safe_add(_loc_5, _loc_10)
_loc_6 = cls.safe_add(_loc_6, _loc_11)
return [_loc_3, _loc_4, _loc_5, _loc_6]
@classmethod
def md5_cmn(cls, param1, param2, param3, param4, param5, param6):
return cls.safe_add(
cls.bit_rol(cls.safe_add(cls.safe_add(param2, param1), cls.safe_add(param4, param6)), param5), param3)
@classmethod
def md5_ff(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param2 & param3 | ~param2 & param4, param1, param2, param5, param6, param7)
@classmethod
def md5_gg(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param2 & param4 | param3 & ~param4, param1, param2, param5, param6, param7)
@classmethod
def md5_hh(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param2 ^ param3 ^ param4, param1, param2, param5, param6, param7)
@classmethod
def md5_ii(cls, param1, param2, param3, param4, param5, param6, param7):
return cls.md5_cmn(param3 ^ (param2 | ~param4), param1, param2, param5, param6, param7)
@classmethod
def safe_add(cls, param1, param2):
_loc_3 = (param1 & 65535) + (param2 & 65535)
_loc_4 = (param1 >> 16) + (param2 >> 16) + (_loc_3 >> 16)
return cls.lshift(_loc_4, 16) | _loc_3 & 65535
@classmethod
def bit_rol(cls, param1, param2):
return cls.lshift(param1, param2) | (param1 & 0xFFFFFFFF) >> (32 - param2)
@staticmethod
def lshift(value, count):
r = (0xFFFFFFFF & value) << count
return -(~(r - 1) & 0xFFFFFFFF) if r > 0x7FFFFFFF else r
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
video = self._download_json(
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
title = video['title']
duration = float_or_none(video['duration'], 1000)
like_count = video['likes']
uploader = video['channel']
uploader_id = video['channel_id']
formats = []
for resource in video['resources']:
resource_id = resource.get('_id')
if not resource_id:
continue
security = self._download_json(
self._SECURITY_URL_TEMPLATE % (video_id, resource_id),
video_id, 'Downloading security hash for %s' % resource_id)
security_hash = security.get('hash')
if not security_hash:
message = security.get('message')
if message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, message), expected=True)
continue
hash_code = security_hash[:2]
received_time = int(security_hash[2:12])
received_random = security_hash[12:22]
received_md5 = security_hash[22:]
sign_time = received_time + self._RESIGN_EXPIRATION
padding = '%010d' % random.randint(1, 10000000000)
signed_md5 = self.MD5.b64_md5(received_md5 + compat_str(sign_time) + padding)
signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
formats.append({
'url': '%s?h=%s&k=%s' % (resource['url'], signed_hash, 'flash'),
'format_id': resource_id,
'height': resource['height']
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'like_count': like_count,
'formats': formats
}

View File

@ -36,16 +36,16 @@ class GodTubeIE(InfoExtractor):
'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
video_id, 'Downloading player config XML')
video_url = config.find('.//file').text
uploader = config.find('.//author').text
timestamp = parse_iso8601(config.find('.//date').text)
duration = parse_duration(config.find('.//duration').text)
thumbnail = config.find('.//image').text
video_url = config.find('file').text
uploader = config.find('author').text
timestamp = parse_iso8601(config.find('date').text)
duration = parse_duration(config.find('duration').text)
thumbnail = config.find('image').text
media = self._download_xml(
'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
title = media.find('.//title').text
title = media.find('title').text
return {
'id': video_id,

View File

@ -0,0 +1,69 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
compat_urlparse,
determine_ext,
)
class GolemIE(InfoExtractor):
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
_TEST = {
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
'info_dict': {
'id': '14095',
'format_id': 'high',
'ext': 'mp4',
'title': 'iPhone 6 und 6 Plus - Test',
'duration': 300.44,
'filesize': 65309548,
}
}
_PREFIX = 'http://video.golem.de'
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
info = {
'id': video_id,
'title': config.findtext('./title', 'golem'),
'duration': self._float(config.findtext('./playtime'), 'duration'),
}
formats = []
for e in config:
url = e.findtext('./url')
if not url:
continue
formats.append({
'format_id': e.tag,
'url': compat_urlparse.urljoin(self._PREFIX, url),
'height': self._int(e.get('height'), 'height'),
'width': self._int(e.get('width'), 'width'),
'filesize': self._int(e.findtext('filesize'), 'filesize'),
'ext': determine_ext(e.findtext('./filename')),
})
self._sort_formats(formats)
info['formats'] = formats
thumbnails = []
for e in config.findall('.//teaser'):
url = e.findtext('./url')
if not url:
continue
thumbnails.append({
'url': compat_urlparse.urljoin(self._PREFIX, url),
'width': self._int(e.get('width'), 'thumbnail width'),
'height': self._int(e.get('height'), 'thumbnail height'),
})
info['thumbnails'] = thumbnails
return info

View File

@ -1,13 +1,11 @@
# coding: utf-8
from __future__ import unicode_literals
import datetime
import re
import codecs
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
from ..utils import unified_strdate
class GooglePlusIE(InfoExtractor):
@ -19,74 +17,57 @@ class GooglePlusIE(InfoExtractor):
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
'title': '嘆きの天使 降臨',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
'title': '嘆きの天使 降臨',
}
}
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
self.report_extraction(video_id)
# Extract update date
upload_date = self._html_search_regex(
title = self._og_search_description(webpage).splitlines()[0]
upload_date = unified_strdate(self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, 'upload date', fatal=False, flags=re.VERBOSE)
if upload_date:
# Convert timestring to a format suitable for filename
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
upload_date = upload_date.strftime('%Y%m%d')
# Extract uploader
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
webpage, 'uploader', fatal=False)
# Extract title
# Get the first line for title
video_title = self._og_search_description(webpage).splitlines()[0]
webpage, 'upload date', fatal=False, flags=re.VERBOSE))
uploader = self._html_search_regex(
r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
video_page = self._search_regex(
r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
def unicode_escape(s):
decoder = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4,}',
lambda m: decoder(m.group(0))[0],
s)
# Extract video links all sizes
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage)
if len(mobj) == 0:
raise ExtractorError('Unable to extract video links')
# Sort in resolution
links = sorted(mobj)
# Choose the lowest of the sort, i.e. highest resolution
video_url = links[-1]
# Only get the url. The resolution part in the tuple has no use anymore
video_url = video_url[-1]
# Treat escaped \u0026 style hex
try:
video_url = video_url.decode("unicode_escape")
except AttributeError: # Python 3
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
formats = [{
'url': unicode_escape(video_url),
'ext': 'flv',
'width': int(width),
'height': int(height),
} for width, height, video_url in re.findall(
r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
self._sort_formats(formats)
return {
'id': video_id,
'url': video_url,
'title': title,
'uploader': uploader,
'upload_date': upload_date,
'title': video_title,
'ext': 'flv',
'formats': formats,
}

View File

@ -14,6 +14,14 @@ class GoogleSearchIE(SearchInfoExtractor):
_MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
_TEST = {
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
}
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""

View File

@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
determine_ext,
compat_urllib_parse,
compat_urllib_request,
@ -12,20 +13,22 @@ from ..utils import (
class GorillaVidIE(InfoExtractor):
IE_DESC = 'GorillaVid.in and daclips.in'
IE_DESC = 'GorillaVid.in, daclips.in and movpod.in'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
(?:daclips\.in|gorillavid\.in))/
(?:daclips\.in|gorillavid\.in|movpod\.in))/
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
'''
_FILE_NOT_FOUND_REGEX = r'>(?:404 - )?File Not Found<'
_TESTS = [{
'url': 'http://gorillavid.in/06y9juieqpmi',
'md5': '5ae4a3580620380619678ee4875893ba',
'info_dict': {
'id': '06y9juieqpmi',
'ext': 'flv',
'title': 'Rebecca Black My Moment Official Music Video Reaction',
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
'thumbnail': 're:http://.*\.jpg',
},
}, {
@ -46,6 +49,9 @@ class GorillaVidIE(InfoExtractor):
'title': 'Micro Pig piglets ready on 16th July 2009',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://movpod.in/0wguyyxi1yca',
'only_matching': True,
}]
def _real_extract(self, url):
@ -54,6 +60,9 @@ class GorillaVidIE(InfoExtractor):
webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = dict(re.findall(r'''(?x)<input\s+
type="hidden"\s+
name="([^"]+)"\s+
@ -69,14 +78,14 @@ class GorillaVidIE(InfoExtractor):
webpage = self._download_webpage(req, video_id, 'Downloading video page')
title = self._search_regex(r'style="z-index: [0-9]+;">([0-9a-zA-Z ]+)(?:-.+)?</span>', webpage, 'title')
thumbnail = self._search_regex(r'image:\'(http[^\']+)\',', webpage, 'thumbnail')
url = self._search_regex(r'file: \'(http[^\']+)\',', webpage, 'file url')
title = self._search_regex(r'style="z-index: [0-9]+;">([^<]+)</span>', webpage, 'title')
video_url = self._search_regex(r'file\s*:\s*\'(http[^\']+)\',', webpage, 'file url')
thumbnail = self._search_regex(r'image\s*:\s*\'(http[^\']+)\',', webpage, 'thumbnail', fatal=False)
formats = [{
'format_id': 'sd',
'url': url,
'ext': determine_ext(url),
'url': video_url,
'ext': determine_ext(video_url),
'quality': 1,
}]

View File

@ -0,0 +1,81 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
get_meta_content,
parse_iso8601,
)
class HeiseIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:www\.)?heise\.de/video/artikel/
.+?(?P<id>[0-9]+)\.html(?:$|[?#])
'''
_TEST = {
'url': (
'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
),
'md5': 'ffed432483e922e88545ad9f2f15d30e',
'info_dict': {
'id': '2404147',
'ext': 'mp4',
'title': (
"Podcast: c't uplink 3.3 Owncloud / Tastaturen / Peilsender Smartphone"
),
'format_id': 'mp4_720',
'timestamp': 1411812600,
'upload_date': '20140927',
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
json_url = self._search_regex(
r'json_url:\s*"([^"]+)"', webpage, 'json URL')
config = self._download_json(json_url, video_id)
info = {
'id': video_id,
'thumbnail': config.get('poster'),
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
'description': self._og_search_description(webpage),
}
title = get_meta_content('fulltitle', webpage)
if title:
info['title'] = title
elif config.get('title'):
info['title'] = config['title']
else:
info['title'] = self._og_search_title(webpage)
formats = []
for t, rs in config['formats'].items():
if not rs or not hasattr(rs, 'items'):
self._downloader.report_warning(
'formats: {0}: no resolutions'.format(t))
continue
for height_str, obj in rs.items():
format_id = '{0}_{1}'.format(t, height_str)
if not obj or not obj.get('url'):
self._downloader.report_warning(
'formats: {0}: no url'.format(format_id))
continue
formats.append({
'url': obj['url'],
'format_id': format_id,
'height': self._int(height_str, 'height'),
})
self._sort_formats(formats)
info['formats'] = formats
return info

View File

@ -0,0 +1,56 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class HornBunnyIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?hornbunny\.com/videos/(?P<title_dash>[a-z-]+)-(?P<id>\d+)\.html'
_TEST = {
'url': 'http://hornbunny.com/videos/panty-slut-jerk-off-instruction-5227.html',
'md5': '95e40865aedd08eff60272b704852ad7',
'info_dict': {
'id': '5227',
'ext': 'flv',
'title': 'panty slut jerk off instruction',
'duration': 550,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(
url, video_id, note='Downloading initial webpage')
title = self._html_search_regex(
r'class="title">(.*?)</h2>', webpage, 'title')
redirect_url = self._html_search_regex(
r'pg&settings=(.*?)\|0"\);', webpage, 'title')
webpage2 = self._download_webpage(redirect_url, video_id)
video_url = self._html_search_regex(
r'flvMask:(.*?);', webpage2, 'video_url')
duration = parse_duration(self._search_regex(
r'<strong>Runtime:</strong>\s*([0-9:]+)</div>',
webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'<strong>Views:</strong>\s*(\d+)</div>',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'url': video_url,
'title': title,
'ext': 'flv',
'duration': duration,
'view_count': view_count,
'age_limit': 18,
}

View File

@ -0,0 +1,84 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_request,
int_or_none,
urlencode_postdata,
)
class HostingBulkIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:www\.)?hostingbulk\.com/
(?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html'''
_FILE_DELETED_REGEX = r'<b>File Not Found</b>'
_TEST = {
'url': 'http://hostingbulk.com/n0ulw1hv20fm.html',
'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f',
'info_dict': {
'id': 'n0ulw1hv20fm',
'ext': 'mp4',
'title': 'md5:5afeba33f48ec87219c269e054afd622',
'filesize': 6816081,
'thumbnail': 're:^http://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
# Custom request with cookie to set language to English, so our file
# deleted regex would work.
request = compat_urllib_request.Request(
url, headers={'Cookie': 'lang=english'})
webpage = self._download_webpage(request, video_id)
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title')
filesize = int_or_none(
self._search_regex(
r'<small>\((\d+)\sbytes?\)</small>',
webpage,
'filesize',
fatal=False
)
)
thumbnail = self._search_regex(
r'<img src="([^"]+)".+?class="pic"',
webpage, 'thumbnail', fatal=False)
fields = dict(re.findall(r'''(?x)<input\s+
type="hidden"\s+
name="([^"]+)"\s+
value="([^"]*)"
''', webpage))
request = compat_urllib_request.Request(url, urlencode_postdata(fields))
request.add_header('Content-type', 'application/x-www-form-urlencoded')
response = self._request_webpage(request, video_id,
'Submiting download request')
video_url = response.geturl()
formats = [{
'format_id': 'sd',
'filesize': filesize,
'url': video_url,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}

View File

@ -28,13 +28,13 @@ class HowStuffWorksIE(InfoExtractor):
}
},
{
'url': 'http://adventure.howstuffworks.com/39516-deadliest-catch-jakes-farewell-pots-video.htm',
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
'info_dict': {
'id': '553470',
'display_id': 'deadliest-catch-jakes-farewell-pots',
'id': '453464',
'display_id': 'survival-zone-food-and-water-in-the-savanna',
'ext': 'mp4',
'title': 'Deadliest Catch: Jake\'s Farewell Pots',
'description': 'md5:9632c346d5e43ee238028c9cefd8dbbc',
'title': 'Survival Zone: Food and Water In the Savanna',
'description': 'md5:7e1c89f6411434970c15fa094170c371',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {

View File

@ -33,8 +33,7 @@ class HuffPostIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
data = self._download_json(api_url, video_id)['data']

View File

@ -18,6 +18,7 @@ class IGNIE(InfoExtractor):
_DESCRIPTION_RE = [
r'<span class="page-object-description">(.+?)</span>',
r'id="my_show_video">.*?<p>(.*?)</p>',
r'<meta name="description" content="(.*?)"',
]
_TESTS = [
@ -55,13 +56,26 @@ class IGNIE(InfoExtractor):
'skip_download': True,
},
},
{
'url': 'http://www.ign.com/articles/2014/08/15/rewind-theater-wild-trailer-gamescom-2014?watch',
'md5': '4e9a0bda1e5eebd31ddcf86ec0b9b3c7',
'info_dict': {
'id': '078fdd005f6d3c02f63d795faa1b984f',
'ext': 'mp4',
'title': 'Rewind Theater - Wild Trailer Gamescom 2014',
'description': 'Giant skeletons, bloody hunts, and captivating'
' natural beauty take our breath away.',
},
},
]
def _find_video_id(self, webpage):
res_id = [
r'"video_id"\s*:\s*"(.*?)"',
r'data-video-id="(.+?)"',
r'<object id="vid_(.+?)"',
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
r'class="hero-poster[^"]*?"[^>]*id="(.+?)"',
]
return self._search_regex(res_id, webpage, 'video id')
@ -70,15 +84,17 @@ class IGNIE(InfoExtractor):
name_or_id = mobj.group('name_or_id')
page_type = mobj.group('type')
webpage = self._download_webpage(url, name_or_id)
if page_type == 'articles':
video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, 'video url')
return self.url_result(video_url, ie='IGN')
elif page_type != 'video':
if page_type != 'video':
multiple_urls = re.findall(
'<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
webpage)
if multiple_urls:
return [self.url_result(u, ie='IGN') for u in multiple_urls]
entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
return {
'_type': 'playlist',
'id': name_or_id,
'entries': entries,
}
video_id = self._find_video_id(webpage)
result = self._get_video_info(video_id)
@ -101,13 +117,13 @@ class IGNIE(InfoExtractor):
class OneUPIE(IGNIE):
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html'
IE_NAME = '1up.com'
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
_TESTS = [{
'url': 'http://gamevideos.1up.com/video/id/34976',
'url': 'http://gamevideos.1up.com/video/id/34976.html',
'md5': '68a54ce4ebc772e4b71e3123d413163d',
'info_dict': {
'id': '34976',

View File

@ -63,6 +63,14 @@ class ImdbListIE(InfoExtractor):
IE_NAME = 'imdb:list'
IE_DESC = 'Internet Movie Database lists'
_VALID_URL = r'http://www\.imdb\.com/list/(?P<id>[\da-zA-Z_-]{11})'
_TEST = {
'url': 'http://www.imdb.com/list/JFs9NWw6XI0',
'info_dict': {
'id': 'JFs9NWw6XI0',
'title': 'March 23, 2012 Releases',
},
'playlist_count': 7,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)

View File

@ -46,6 +46,30 @@ class InstagramUserIE(InfoExtractor):
_VALID_URL = r'http://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
'url': 'http://instagram.com/porsche',
'info_dict': {
'id': 'porsche',
'title': 'porsche',
},
'playlist_mincount': 2,
'playlist': [{
'info_dict': {
'id': '614605558512799803_462752227',
'ext': 'mp4',
'title': '#Porsche Intelligent Performance.',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'Porsche',
'uploader_id': 'porsche',
'timestamp': 1387486713,
'upload_date': '20131219',
},
}],
'params': {
'extract_flat': True,
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)

View File

@ -1,3 +1,5 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
@ -12,12 +14,13 @@ class InternetVideoArchiveIE(InfoExtractor):
_VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
_TEST = {
u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
u'file': u'452693.mp4',
u'info_dict': {
u'title': u'SKYFALL',
u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
u'duration': 153,
'url': 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
'info_dict': {
'id': '452693',
'ext': 'mp4',
'title': 'SKYFALL',
'description': 'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
'duration': 149,
},
}
@ -42,7 +45,7 @@ class InternetVideoArchiveIE(InfoExtractor):
url = self._build_url(query)
flashconfiguration = self._download_xml(url, video_id,
u'Downloading flash configuration')
'Downloading flash configuration')
file_url = flashconfiguration.find('file').text
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
# Replace some of the parameters in the query to get the best quality
@ -51,7 +54,7 @@ class InternetVideoArchiveIE(InfoExtractor):
lambda m: self._clean_query(m.group()),
file_url)
info = self._download_xml(file_url, video_id,
u'Downloading video info')
'Downloading video info')
item = info.find('channel/item')
def _bp(p):

View File

@ -127,6 +127,21 @@ class IviCompilationIE(InfoExtractor):
IE_DESC = 'ivi.ru compilations'
IE_NAME = 'ivi:compilation'
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
_TESTS = [{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
'info_dict': {
'id': 'dvoe_iz_lartsa',
'title': 'Двое из ларца (2006 - 2008)',
},
'playlist_mincount': 24,
}, {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
'info_dict': {
'id': 'dvoe_iz_lartsa/season1',
'title': 'Двое из ларца (2006 - 2008) 1 сезон',
},
'playlist_mincount': 12,
}]
def _extract_entries(self, html, compilation_id):
return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')

View File

@ -9,29 +9,50 @@ from ..utils import (
parse_iso8601,
determine_ext,
int_or_none,
float_or_none,
str_to_int,
)
class IzleseneIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|m)\.)?izlesene\.com/(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)'
_STREAM_URL = 'http://panel.izlesene.com/api/streamurl/{id:}/{format:}'
_TEST = {
'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
'md5': '4384f9f0ea65086734b881085ee05ac2',
'info_dict': {
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'Annesi oğluna doğum günü hediyesi olarak minecraft cd si alıyor, ve çocuk hunharca seviniyor',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': 1404298698,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
}
}
_VALID_URL = r'''(?x)
https?://(?:(?:www|m)\.)?izlesene\.com/
(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
'''
_TESTS = [
{
'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
'md5': '4384f9f0ea65086734b881085ee05ac2',
'info_dict': {
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': 1404298698,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
}
},
{
'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
'md5': '97f09b6872bffa284cb7fa4f6910cb72',
'info_dict': {
'id': '17997',
'ext': 'mp4',
'title': 'Tarkan Dortmund 2006 Konseri',
'description': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': 1163318593,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@ -42,21 +63,23 @@ class IzleseneIE(InfoExtractor):
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
thumbnail = self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';", webpage, 'uploader', fatal=False, default='')
r"adduserUsername\s*=\s*'([^']+)';",
webpage, 'uploader', fatal=False, default='')
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date', fatal=False))
duration = int_or_none(self._html_search_regex(
r'"videoduration"\s*:\s*"([^"]+)"', webpage, 'duration', fatal=False))
if duration:
duration /= 1000.0
duration = float_or_none(self._html_search_regex(
r'"videoduration"\s*:\s*"([^"]+)"',
webpage, 'duration', fatal=False), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
r'comment_count\s*=\s*\'([^\']+)\';', webpage, 'uploader', fatal=False)
r'comment_count\s*=\s*\'([^\']+)\';',
webpage, 'comment_count', fatal=False)
family_friendly = self._html_search_meta(
'isFamilyFriendly', webpage, 'age limit', fatal=False)
@ -66,20 +89,26 @@ class IzleseneIE(InfoExtractor):
ext = determine_ext(content_url, 'mp4')
# Might be empty for some videos.
qualities = self._html_search_regex(
r'"quality"\s*:\s*"([^"]+)"', webpage, 'qualities', fatal=False, default='')
streams = self._html_search_regex(
r'"qualitylevel"\s*:\s*"([^"]+)"',
webpage, 'streams', fatal=False, default='')
formats = []
for quality in qualities.split('|'):
json = self._download_json(
self._STREAM_URL.format(id=video_id, format=quality), video_id,
note='Getting video URL for "%s" quality' % quality,
errnote='Failed to get video URL for "%s" quality' % quality
)
if streams:
for stream in streams.split('|'):
quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
formats.append({
'format_id': '%sp' % quality if quality else 'sd',
'url': url,
'ext': ext,
})
else:
stream_url = self._search_regex(
r'"streamurl"\s?:\s?"([^"]+)"', webpage, 'stream URL')
formats.append({
'url': json.get('streamurl'),
'format_id': 'sd',
'url': stream_url,
'ext': ext,
'format_id': '%sp' % quality if quality else 'sd',
})
return {

View File

@ -1,8 +1,6 @@
# coding=utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
@ -12,14 +10,14 @@ from ..utils import (
class JpopsukiIE(InfoExtractor):
IE_NAME = 'jpopsuki.tv'
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/(?:category/)?video/[^/]+/(?P<id>\S+)'
_TEST = {
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
'file': '00be659d23b0b40508169cdee4545771.mp4',
'info_dict': {
'id': '00be659d23b0b40508169cdee4545771',
'ext': 'mp4',
'title': 'ayumi hamasaki - evolution',
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
@ -30,8 +28,7 @@ class JpopsukiIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
@ -47,11 +44,9 @@ class JpopsukiIE(InfoExtractor):
uploader_id = self._html_search_regex(
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
webpage, 'video uploader_id', fatal=False)
upload_date = self._html_search_regex(
upload_date = unified_strdate(self._html_search_regex(
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
fatal=False)
if upload_date is not None:
upload_date = unified_strdate(upload_date)
fatal=False))
view_count_str = self._html_search_regex(
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
fatal=False)

View File

@ -11,10 +11,9 @@ from ..utils import (
class JukeboxIE(InfoExtractor):
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<id>[a-z0-9\-]+)\.html'
_TEST = {
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
'md5': '1574e9b4d6438446d5b7dbcdf2786276',
'info_dict': {
'id': 'r303r',
'ext': 'flv',
@ -24,8 +23,7 @@ class JukeboxIE(InfoExtractor):
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
video_id = self._match_id(url)
html = self._download_webpage(url, video_id)
iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))

View File

@ -9,21 +9,30 @@ from ..utils import (
class KhanAcademyIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
_VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
IE_NAME = 'KhanAcademy'
_TEST = {
_TESTS = [{
'url': 'http://www.khanacademy.org/video/one-time-pad',
'file': 'one-time-pad.mp4',
'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
'info_dict': {
'id': 'one-time-pad',
'ext': 'mp4',
'title': 'The one-time pad',
'description': 'The perfect cipher',
'duration': 176,
'uploader': 'Brit Cruise',
'upload_date': '20120411',
}
}
}, {
'url': 'https://www.khanacademy.org/math/applied-math/cryptography',
'info_dict': {
'id': 'cryptography',
'title': 'Journey into cryptography',
'description': 'How have humans protected their secret messages through history? What has changed today?',
},
'playlist_mincount': 3,
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)

View File

@ -34,7 +34,7 @@ class KontrTubeIE(InfoExtractor):
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
title = self._html_search_regex(
r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage, 'video title')
r'<title>(.+?)</title>', webpage, 'video title')
description = self._html_search_meta('description', webpage, 'video description')
mobj = re.search(

Some files were not shown because too many files have changed in this diff Show More