Compare commits
30 Commits
2013.09.16
...
2013.09.20
Author | SHA1 | Date | |
---|---|---|---|
|
58f289d013 | ||
|
3d60bb96e1 | ||
|
38d025b3f0 | ||
|
c40c6aaaaa | ||
|
1a810f0d4e | ||
|
63037593c0 | ||
|
7a878d47fa | ||
|
bc4b900898 | ||
|
c5e743f66f | ||
|
6c36d8d6fb | ||
|
71c82637e7 | ||
|
2dad310e2c | ||
|
d0ae9e3a8d | ||
|
a19413c311 | ||
|
1ef80b55dd | ||
|
eb03f4dad3 | ||
|
830dd1944a | ||
|
1237c9a3a5 | ||
|
5d13df79a5 | ||
|
6523223a4c | ||
|
4a67aafb7e | ||
|
f3f34c5b0f | ||
|
6ae8ee3f54 | ||
|
e8f8e80097 | ||
|
4dc0ff3ecf | ||
|
4b6462fc1e | ||
|
c4ece78564 | ||
|
0761d02b0b | ||
|
71c107fc57 | ||
|
7459e3a290 |
@@ -19,7 +19,8 @@ which means you can modify it, redistribute it or use it however you like.
|
|||||||
-U, --update update this program to latest version. Make sure
|
-U, --update update this program to latest version. Make sure
|
||||||
that you have sufficient permissions (run with
|
that you have sufficient permissions (run with
|
||||||
sudo if needed)
|
sudo if needed)
|
||||||
-i, --ignore-errors continue on download errors
|
-i, --ignore-errors continue on download errors, for example to to
|
||||||
|
skip unavailable videos in a playlist
|
||||||
--dump-user-agent display the current browser identification
|
--dump-user-agent display the current browser identification
|
||||||
--user-agent UA specify a custom user agent
|
--user-agent UA specify a custom user agent
|
||||||
--referer REF specify a custom referer, use if the video access
|
--referer REF specify a custom referer, use if the video access
|
||||||
|
@@ -1,13 +1,20 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
# Generate youtube signature algorithm from test cases
|
# Generate youtube signature algorithm from test cases
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
tests = [
|
tests = [
|
||||||
|
# 93 - vfl79wBKW 2013/07/20
|
||||||
|
(u"qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<'`~\"€",
|
||||||
|
u".>/?;:|}][{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWQ098765'321mnbvcxzasdfghjklpoiu"),
|
||||||
# 92 - vflQw-fB4 2013/07/17
|
# 92 - vflQw-fB4 2013/07/17
|
||||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<'`~\"",
|
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<'`~\"",
|
||||||
"mrtyuioplkjhgfdsazxcvbnq1234567890QWERTY}IOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]\"|:;"),
|
"mrtyuioplkjhgfdsazxcvbnq1234567890QWERTY}IOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]\"|:;"),
|
||||||
|
# 91 - vfl79wBKW 2013/07/20 (sporadic)
|
||||||
|
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<'`~",
|
||||||
|
"/?;:|}][{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWQ09876543.1mnbvcxzasdfghjklpoiu"),
|
||||||
# 90
|
# 90
|
||||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<'`",
|
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<'`",
|
||||||
"mrtyuioplkjhgfdsazxcvbne1234567890QWER[YUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={`]}|"),
|
"mrtyuioplkjhgfdsazxcvbne1234567890QWER[YUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={`]}|"),
|
||||||
@@ -24,8 +31,8 @@ tests = [
|
|||||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[|};?/>.<",
|
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[|};?/>.<",
|
||||||
"yuioplkjhgfdsazxcvbnm12345678q0QWrRTYUIOELKJHGFD-AZXCVBNM!@#$%^&*()_<+={[|};?/>.S"),
|
"yuioplkjhgfdsazxcvbnm12345678q0QWrRTYUIOELKJHGFD-AZXCVBNM!@#$%^&*()_<+={[|};?/>.S"),
|
||||||
# 85 - vflkuzxcs 2013/09/11
|
# 85 - vflkuzxcs 2013/09/11
|
||||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?/>.<",
|
('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[',
|
||||||
"T>/?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOvUY.REWQ0987654321mnbqcxzasdfghjklpoiuytr"),
|
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@'),
|
||||||
# 84 - vflg0g8PQ 2013/08/29 (sporadic)
|
# 84 - vflg0g8PQ 2013/08/29 (sporadic)
|
||||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?>.<",
|
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?>.<",
|
||||||
">?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWq0987654321mnbvcxzasdfghjklpoiuytr"),
|
">?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWq0987654321mnbvcxzasdfghjklpoiuytr"),
|
||||||
|
@@ -142,14 +142,10 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
def to_screen(self, message, skip_eol=False):
|
def to_screen(self, message, skip_eol=False):
|
||||||
"""Print message to stdout if not in quiet mode."""
|
"""Print message to stdout if not in quiet mode."""
|
||||||
assert type(message) == type(u'')
|
|
||||||
if not self.params.get('quiet', False):
|
if not self.params.get('quiet', False):
|
||||||
terminator = [u'\n', u''][skip_eol]
|
terminator = [u'\n', u''][skip_eol]
|
||||||
output = message + terminator
|
output = message + terminator
|
||||||
if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
|
write_string(output, self._screen_file)
|
||||||
output = output.encode(preferredencoding(), 'ignore')
|
|
||||||
self._screen_file.write(output)
|
|
||||||
self._screen_file.flush()
|
|
||||||
|
|
||||||
def to_stderr(self, message):
|
def to_stderr(self, message):
|
||||||
"""Print message to stderr."""
|
"""Print message to stderr."""
|
||||||
@@ -548,11 +544,11 @@ class YoutubeDL(object):
|
|||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
success = self.fd._do_download(filename, info_dict)
|
success = self.fd._do_download(filename, info_dict)
|
||||||
except (OSError, IOError) as err:
|
|
||||||
raise UnavailableVideoError(err)
|
|
||||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||||
self.report_error(u'unable to download video data: %s' % str(err))
|
self.report_error(u'unable to download video data: %s' % str(err))
|
||||||
return
|
return
|
||||||
|
except (OSError, IOError) as err:
|
||||||
|
raise UnavailableVideoError(err)
|
||||||
except (ContentTooShortError, ) as err:
|
except (ContentTooShortError, ) as err:
|
||||||
self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||||
return
|
return
|
||||||
|
@@ -30,6 +30,7 @@ __authors__ = (
|
|||||||
'Pierre Rudloff',
|
'Pierre Rudloff',
|
||||||
'Huarong Huo',
|
'Huarong Huo',
|
||||||
'Ismael Mejía',
|
'Ismael Mejía',
|
||||||
|
'Steffan \'Ruirize\' James',
|
||||||
)
|
)
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
@@ -149,7 +150,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
general.add_option('-U', '--update',
|
general.add_option('-U', '--update',
|
||||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||||
general.add_option('-i', '--ignore-errors',
|
general.add_option('-i', '--ignore-errors',
|
||||||
action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
|
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
|
||||||
general.add_option('--dump-user-agent',
|
general.add_option('--dump-user-agent',
|
||||||
action='store_true', dest='dump_user_agent',
|
action='store_true', dest='dump_user_agent',
|
||||||
help='display the current browser identification', default=False)
|
help='display the current browser identification', default=False)
|
||||||
@@ -354,7 +355,7 @@ def parseOpts(overrideArguments=None):
|
|||||||
if overrideArguments is not None:
|
if overrideArguments is not None:
|
||||||
opts, args = parser.parse_args(overrideArguments)
|
opts, args = parser.parse_args(overrideArguments)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
sys.stderr.write(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
||||||
else:
|
else:
|
||||||
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
|
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
|
||||||
if xdg_config_home:
|
if xdg_config_home:
|
||||||
@@ -367,9 +368,9 @@ def parseOpts(overrideArguments=None):
|
|||||||
argv = systemConf + userConf + commandLineConf
|
argv = systemConf + userConf + commandLineConf
|
||||||
opts, args = parser.parse_args(argv)
|
opts, args = parser.parse_args(argv)
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
sys.stderr.write(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
||||||
sys.stderr.write(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
||||||
sys.stderr.write(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
||||||
|
|
||||||
return parser, opts, args
|
return parser, opts, args
|
||||||
|
|
||||||
@@ -392,7 +393,7 @@ def _real_main(argv=None):
|
|||||||
except (IOError, OSError) as err:
|
except (IOError, OSError) as err:
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
sys.stderr.write(u'ERROR: unable to open cookie file\n')
|
write_string(u'ERROR: unable to open cookie file\n')
|
||||||
sys.exit(101)
|
sys.exit(101)
|
||||||
# Set user agent
|
# Set user agent
|
||||||
if opts.user_agent is not None:
|
if opts.user_agent is not None:
|
||||||
@@ -419,7 +420,7 @@ def _real_main(argv=None):
|
|||||||
batchurls = [x.strip() for x in batchurls]
|
batchurls = [x.strip() for x in batchurls]
|
||||||
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
|
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
sys.stderr.write(u'[debug] Batch file urls: ' + repr(batchurls) + u'\n')
|
write_string(u'[debug] Batch file urls: ' + repr(batchurls) + u'\n')
|
||||||
except IOError:
|
except IOError:
|
||||||
sys.exit(u'ERROR: batch file could not be read')
|
sys.exit(u'ERROR: batch file could not be read')
|
||||||
all_urls = batchurls + args
|
all_urls = batchurls + args
|
||||||
@@ -611,7 +612,7 @@ def _real_main(argv=None):
|
|||||||
})
|
})
|
||||||
|
|
||||||
if opts.verbose:
|
if opts.verbose:
|
||||||
sys.stderr.write(u'[debug] youtube-dl version ' + __version__ + u'\n')
|
write_string(u'[debug] youtube-dl version ' + __version__ + u'\n')
|
||||||
try:
|
try:
|
||||||
sp = subprocess.Popen(
|
sp = subprocess.Popen(
|
||||||
['git', 'rev-parse', '--short', 'HEAD'],
|
['git', 'rev-parse', '--short', 'HEAD'],
|
||||||
@@ -620,14 +621,14 @@ def _real_main(argv=None):
|
|||||||
out, err = sp.communicate()
|
out, err = sp.communicate()
|
||||||
out = out.decode().strip()
|
out = out.decode().strip()
|
||||||
if re.match('[0-9a-f]+', out):
|
if re.match('[0-9a-f]+', out):
|
||||||
sys.stderr.write(u'[debug] Git HEAD: ' + out + u'\n')
|
write_string(u'[debug] Git HEAD: ' + out + u'\n')
|
||||||
except:
|
except:
|
||||||
try:
|
try:
|
||||||
sys.exc_clear()
|
sys.exc_clear()
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
sys.stderr.write(u'[debug] Python version %s - %s' %(platform.python_version(), platform_name()) + u'\n')
|
write_string(u'[debug] Python version %s - %s' %(platform.python_version(), platform_name()) + u'\n')
|
||||||
sys.stderr.write(u'[debug] Proxy map: ' + str(proxy_handler.proxies) + u'\n')
|
write_string(u'[debug] Proxy map: ' + str(proxy_handler.proxies) + u'\n')
|
||||||
|
|
||||||
ydl.add_default_info_extractors()
|
ydl.add_default_info_extractors()
|
||||||
|
|
||||||
|
@@ -6,6 +6,7 @@ from .arte import ArteTvIE
|
|||||||
from .auengine import AUEngineIE
|
from .auengine import AUEngineIE
|
||||||
from .bandcamp import BandcampIE
|
from .bandcamp import BandcampIE
|
||||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||||
|
from .bloomberg import BloombergIE
|
||||||
from .breakcom import BreakIE
|
from .breakcom import BreakIE
|
||||||
from .brightcove import BrightcoveIE
|
from .brightcove import BrightcoveIE
|
||||||
from .c56 import C56IE
|
from .c56 import C56IE
|
||||||
@@ -23,11 +24,16 @@ from .depositfiles import DepositFilesIE
|
|||||||
from .dotsub import DotsubIE
|
from .dotsub import DotsubIE
|
||||||
from .dreisat import DreiSatIE
|
from .dreisat import DreiSatIE
|
||||||
from .defense import DefenseGouvFrIE
|
from .defense import DefenseGouvFrIE
|
||||||
|
from .ebaumsworld import EbaumsWorldIE
|
||||||
from .ehow import EHowIE
|
from .ehow import EHowIE
|
||||||
from .eighttracks import EightTracksIE
|
from .eighttracks import EightTracksIE
|
||||||
from .escapist import EscapistIE
|
from .escapist import EscapistIE
|
||||||
from .exfm import ExfmIE
|
from .exfm import ExfmIE
|
||||||
from .facebook import FacebookIE
|
from .facebook import FacebookIE
|
||||||
|
from .fktv import (
|
||||||
|
FKTVIE,
|
||||||
|
FKTVPosteckeIE,
|
||||||
|
)
|
||||||
from .flickr import FlickrIE
|
from .flickr import FlickrIE
|
||||||
from .francetv import (
|
from .francetv import (
|
||||||
PluzzIE,
|
PluzzIE,
|
||||||
@@ -67,6 +73,7 @@ from .myvideo import MyVideoIE
|
|||||||
from .naver import NaverIE
|
from .naver import NaverIE
|
||||||
from .nba import NBAIE
|
from .nba import NBAIE
|
||||||
from .nbc import NBCNewsIE
|
from .nbc import NBCNewsIE
|
||||||
|
from .newgrounds import NewgroundsIE
|
||||||
from .ooyala import OoyalaIE
|
from .ooyala import OoyalaIE
|
||||||
from .orf import ORFIE
|
from .orf import ORFIE
|
||||||
from .pbs import PBSIE
|
from .pbs import PBSIE
|
||||||
@@ -103,6 +110,7 @@ from .vbox7 import Vbox7IE
|
|||||||
from .veehd import VeeHDIE
|
from .veehd import VeeHDIE
|
||||||
from .veoh import VeohIE
|
from .veoh import VeohIE
|
||||||
from .vevo import VevoIE
|
from .vevo import VevoIE
|
||||||
|
from .vice import ViceIE
|
||||||
from .videofyme import VideofyMeIE
|
from .videofyme import VideofyMeIE
|
||||||
from .vimeo import VimeoIE, VimeoChannelIE
|
from .vimeo import VimeoIE, VimeoChannelIE
|
||||||
from .vine import VineIE
|
from .vine import VineIE
|
||||||
|
27
youtube_dl/extractor/bloomberg.py
Normal file
27
youtube_dl/extractor/bloomberg.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class BloombergIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://www\.bloomberg\.com/video/(?P<name>.+?).html'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
u'url': u'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html',
|
||||||
|
u'file': u'12bzhqZTqQHmmlA8I-i0NpzJgcG5NNYX.mp4',
|
||||||
|
u'info_dict': {
|
||||||
|
u'title': u'Shah\'s Presentation on Foreign-Exchange Strategies',
|
||||||
|
u'description': u'md5:abc86e5236f9f0e4866c59ad36736686',
|
||||||
|
},
|
||||||
|
u'params': {
|
||||||
|
# Requires ffmpeg (m3u8 manifest)
|
||||||
|
u'skip_download': True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
name = mobj.group('name')
|
||||||
|
webpage = self._download_webpage(url, name)
|
||||||
|
ooyala_url = self._og_search_video_url(webpage)
|
||||||
|
return self.url_result(ooyala_url, ie='Ooyala')
|
37
youtube_dl/extractor/ebaumsworld.py
Normal file
37
youtube_dl/extractor/ebaumsworld.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
import re
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import determine_ext
|
||||||
|
|
||||||
|
|
||||||
|
class EbaumsWorldIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://www\.ebaumsworld\.com/video/watch/(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
u'url': u'http://www.ebaumsworld.com/video/watch/83367677/',
|
||||||
|
u'file': u'83367677.mp4',
|
||||||
|
u'info_dict': {
|
||||||
|
u'title': u'A Giant Python Opens The Door',
|
||||||
|
u'description': u'This is how nightmares start...',
|
||||||
|
u'uploader': u'jihadpizza',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
config_xml = self._download_webpage(
|
||||||
|
'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id)
|
||||||
|
config = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
|
||||||
|
video_url = config.find('file').text
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': config.find('title').text,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': determine_ext(video_url),
|
||||||
|
'description': config.find('description').text,
|
||||||
|
'thumbnail': config.find('image').text,
|
||||||
|
'uploader': config.find('username').text,
|
||||||
|
}
|
79
youtube_dl/extractor/fktv.py
Normal file
79
youtube_dl/extractor/fktv.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
import re
|
||||||
|
import random
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
get_element_by_id,
|
||||||
|
clean_html,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FKTVIE(InfoExtractor):
|
||||||
|
IE_NAME = u'fernsehkritik.tv'
|
||||||
|
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
u'url': u'http://fernsehkritik.tv/folge-1',
|
||||||
|
u'file': u'00011.flv',
|
||||||
|
u'info_dict': {
|
||||||
|
u'title': u'Folge 1 vom 10. April 2007',
|
||||||
|
u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
episode = int(mobj.group('ep'))
|
||||||
|
|
||||||
|
server = random.randint(2, 4)
|
||||||
|
video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode
|
||||||
|
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
||||||
|
episode)
|
||||||
|
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
||||||
|
u'playlist', flags=re.DOTALL)
|
||||||
|
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
||||||
|
# TODO: return a single multipart video
|
||||||
|
videos = []
|
||||||
|
for i, _ in enumerate(files, 1):
|
||||||
|
video_id = '%04d%d' % (episode, i)
|
||||||
|
video_url = 'http://dl%d.fernsehkritik.tv/fernsehkritik%d%s.flv' % (server, episode, '' if i == 1 else '-%d' % i)
|
||||||
|
video_title = 'Fernsehkritik %d.%d' % (episode, i)
|
||||||
|
videos.append({
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': determine_ext(video_url),
|
||||||
|
'title': clean_html(get_element_by_id('eptitle', start_webpage)),
|
||||||
|
'description': clean_html(get_element_by_id('contentlist', start_webpage)),
|
||||||
|
'thumbnail': video_thumbnail
|
||||||
|
})
|
||||||
|
return videos
|
||||||
|
|
||||||
|
|
||||||
|
class FKTVPosteckeIE(InfoExtractor):
|
||||||
|
IE_NAME = u'fernsehkritik.tv:postecke'
|
||||||
|
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik.tv/inline-video/postecke.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
|
||||||
|
_TEST = {
|
||||||
|
u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
|
||||||
|
u'file': u'0120.flv',
|
||||||
|
u'md5': u'262f0adbac80317412f7e57b4808e5c4',
|
||||||
|
u'info_dict': {
|
||||||
|
u"title": u"Postecke 120"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
episode = int(mobj.group('ep'))
|
||||||
|
|
||||||
|
server = random.randint(2, 4)
|
||||||
|
video_id = '%04d' % episode
|
||||||
|
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
||||||
|
video_title = 'Postecke %d' % episode
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': determine_ext(video_url),
|
||||||
|
'title': video_title,
|
||||||
|
}
|
@@ -34,17 +34,7 @@ class PluzzIE(FranceTVBaseInfoExtractor):
|
|||||||
IE_NAME = u'pluzz.francetv.fr'
|
IE_NAME = u'pluzz.francetv.fr'
|
||||||
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
|
_VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html'
|
||||||
|
|
||||||
_TEST = {
|
# Can't use tests, videos expire in 7 days
|
||||||
u'url': u'http://pluzz.francetv.fr/videos/allo_rufo_saison5_,88439064.html',
|
|
||||||
u'file': u'88439064.mp4',
|
|
||||||
u'info_dict': {
|
|
||||||
u'title': u'Allô Rufo',
|
|
||||||
u'description': u'md5:d909f1ebdf963814b65772aea250400e',
|
|
||||||
},
|
|
||||||
u'params': {
|
|
||||||
u'skip_download': True,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
title = re.match(self._VALID_URL, url).group(1)
|
title = re.match(self._VALID_URL, url).group(1)
|
||||||
|
@@ -21,7 +21,8 @@ class FunnyOrDieIE(InfoExtractor):
|
|||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_url = self._search_regex(r'type="video/mp4" src="(.*?)"',
|
video_url = self._search_regex(
|
||||||
|
[r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''],
|
||||||
webpage, u'video URL', flags=re.DOTALL)
|
webpage, u'video URL', flags=re.DOTALL)
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
|
@@ -7,11 +7,11 @@ from .common import InfoExtractor
|
|||||||
class HotNewHipHopIE(InfoExtractor):
|
class HotNewHipHopIE(InfoExtractor):
|
||||||
_VALID_URL = r'http://www\.hotnewhiphop.com/.*\.(?P<id>.*)\.html'
|
_VALID_URL = r'http://www\.hotnewhiphop.com/.*\.(?P<id>.*)\.html'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html'",
|
u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html",
|
||||||
u'file': u'1435540.mp3',
|
u'file': u'1435540.mp3',
|
||||||
u'md5': u'2c2cd2f76ef11a9b3b581e8b232f3d96',
|
u'md5': u'2c2cd2f76ef11a9b3b581e8b232f3d96',
|
||||||
u'info_dict': {
|
u'info_dict': {
|
||||||
u"title": u"Freddie Gibbs Songs - Lay It Down"
|
u"title": u"Freddie Gibbs - Lay It Down"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
38
youtube_dl/extractor/newgrounds.py
Normal file
38
youtube_dl/extractor/newgrounds.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import determine_ext
|
||||||
|
|
||||||
|
|
||||||
|
class NewgroundsIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'(?:https?://)?(?:www\.)?newgrounds\.com/audio/listen/(?P<id>\d+)'
|
||||||
|
_TEST = {
|
||||||
|
u'url': u'http://www.newgrounds.com/audio/listen/549479',
|
||||||
|
u'file': u'549479.mp3',
|
||||||
|
u'md5': u'fe6033d297591288fa1c1f780386f07a',
|
||||||
|
u'info_dict': {
|
||||||
|
u"title": u"B7 - BusMode",
|
||||||
|
u"uploader": u"Burn7",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
music_id = mobj.group('id')
|
||||||
|
webpage = self._download_webpage(url, music_id)
|
||||||
|
|
||||||
|
title = self._html_search_regex(r',"name":"([^"]+)",', webpage, u'music title')
|
||||||
|
uploader = self._html_search_regex(r',"artist":"([^"]+)",', webpage, u'music uploader')
|
||||||
|
|
||||||
|
music_url_json_string = self._html_search_regex(r'({"url":"[^"]+"),', webpage, u'music url') + '}'
|
||||||
|
music_url_json = json.loads(music_url_json_string)
|
||||||
|
music_url = music_url_json['url']
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': music_id,
|
||||||
|
'title': title,
|
||||||
|
'url': music_url,
|
||||||
|
'uploader': uploader,
|
||||||
|
'ext': determine_ext(music_url),
|
||||||
|
}
|
@@ -18,11 +18,15 @@ class OoyalaIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _url_for_embed_code(embed_code):
|
||||||
|
return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
|
||||||
|
|
||||||
def _extract_result(self, info, more_info):
|
def _extract_result(self, info, more_info):
|
||||||
return {'id': info['embedCode'],
|
return {'id': info['embedCode'],
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': unescapeHTML(info['title']),
|
'title': unescapeHTML(info['title']),
|
||||||
'url': info['url'],
|
'url': info.get('ipad_url') or info['url'],
|
||||||
'description': unescapeHTML(more_info['description']),
|
'description': unescapeHTML(more_info['description']),
|
||||||
'thumbnail': more_info['promo'],
|
'thumbnail': more_info['promo'],
|
||||||
}
|
}
|
||||||
@@ -35,7 +39,9 @@ class OoyalaIE(InfoExtractor):
|
|||||||
mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="',
|
mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="',
|
||||||
player, u'mobile player url')
|
player, u'mobile player url')
|
||||||
mobile_player = self._download_webpage(mobile_url, embedCode)
|
mobile_player = self._download_webpage(mobile_url, embedCode)
|
||||||
videos_info = self._search_regex(r'eval\("\((\[{.*?stream_redirect.*?}\])\)"\);', mobile_player, u'info').replace('\\"','"')
|
videos_info = self._search_regex(
|
||||||
|
r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);',
|
||||||
|
mobile_player, u'info').replace('\\"','"')
|
||||||
videos_more_info = self._search_regex(r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, u'more info').replace('\\"','"')
|
videos_more_info = self._search_regex(r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, u'more info').replace('\\"','"')
|
||||||
videos_info = json.loads(videos_info)
|
videos_info = json.loads(videos_info)
|
||||||
videos_more_info =json.loads(videos_more_info)
|
videos_more_info =json.loads(videos_more_info)
|
||||||
|
38
youtube_dl/extractor/vice.py
Normal file
38
youtube_dl/extractor/vice.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from .ooyala import OoyalaIE
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class ViceIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://www.vice.com/.*?/(?P<name>.+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
u'url': u'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
|
||||||
|
u'file': u'43cW1mYzpia9IlestBjVpd23Yu3afAfp.mp4',
|
||||||
|
u'info_dict': {
|
||||||
|
u'title': u'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
|
||||||
|
},
|
||||||
|
u'params': {
|
||||||
|
# Requires ffmpeg (m3u8 manifest)
|
||||||
|
u'skip_download': True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
name = mobj.group('name')
|
||||||
|
webpage = self._download_webpage(url, name)
|
||||||
|
try:
|
||||||
|
ooyala_url = self._og_search_video_url(webpage)
|
||||||
|
except ExtractorError:
|
||||||
|
try:
|
||||||
|
embed_code = self._search_regex(
|
||||||
|
r'OO.Player.create\(\'ooyalaplayer\', \'(.+?)\'', webpage,
|
||||||
|
u'ooyala embed code')
|
||||||
|
ooyala_url = OoyalaIE._url_for_embed_code(embed_code)
|
||||||
|
except ExtractorError:
|
||||||
|
raise ExtractorError(u'The page doesn\'t contain a video', expected=True)
|
||||||
|
return self.url_result(ooyala_url, ie='Ooyala')
|
||||||
|
|
@@ -11,8 +11,8 @@ from ..utils import (
|
|||||||
|
|
||||||
class XHamsterIE(InfoExtractor):
|
class XHamsterIE(InfoExtractor):
|
||||||
"""Information Extractor for xHamster"""
|
"""Information Extractor for xHamster"""
|
||||||
_VALID_URL = r'(?:http://)?(?:www.)?xhamster\.com/movies/(?P<id>[0-9]+)/.*\.html'
|
_VALID_URL = r'(?:http://)?(?:www\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
u'url': u'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
|
u'url': u'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
|
||||||
u'file': u'1509445.flv',
|
u'file': u'1509445.flv',
|
||||||
u'md5': u'9f48e0e8d58e3076bb236ff412ab62fa',
|
u'md5': u'9f48e0e8d58e3076bb236ff412ab62fa',
|
||||||
@@ -21,13 +21,24 @@ class XHamsterIE(InfoExtractor):
|
|||||||
u"uploader_id": u"Ruseful2011",
|
u"uploader_id": u"Ruseful2011",
|
||||||
u"title": u"FemaleAgent Shy beauty takes the bait"
|
u"title": u"FemaleAgent Shy beauty takes the bait"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
{
|
||||||
|
u'url': u'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
|
||||||
|
u'file': u'2221348.flv',
|
||||||
|
u'md5': u'e767b9475de189320f691f49c679c4c7',
|
||||||
|
u'info_dict': {
|
||||||
|
u"upload_date": u"20130914",
|
||||||
|
u"uploader_id": u"jojo747400",
|
||||||
|
u"title": u"Britney Spears Sexy Booty"
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self,url):
|
def _real_extract(self,url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
mrss_url = 'http://xhamster.com/movies/%s/.html?hd' % video_id
|
seo = mobj.group('seo')
|
||||||
|
mrss_url = 'http://xhamster.com/movies/%s/%s.html?hd' % (video_id, seo)
|
||||||
webpage = self._download_webpage(mrss_url, video_id)
|
webpage = self._download_webpage(mrss_url, video_id)
|
||||||
|
|
||||||
mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
|
mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
|
||||||
|
@@ -416,8 +416,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
def _decrypt_signature(self, s):
|
def _decrypt_signature(self, s):
|
||||||
"""Turn the encrypted s field into a working signature"""
|
"""Turn the encrypted s field into a working signature"""
|
||||||
|
|
||||||
if len(s) == 92:
|
if len(s) == 93:
|
||||||
|
return s[86:29:-1] + s[88] + s[28:5:-1]
|
||||||
|
elif len(s) == 92:
|
||||||
return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83]
|
return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83]
|
||||||
|
elif len(s) == 91:
|
||||||
|
return s[84:27:-1] + s[86] + s[26:5:-1]
|
||||||
elif len(s) == 90:
|
elif len(s) == 90:
|
||||||
return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + s[78:81]
|
return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + s[78:81]
|
||||||
elif len(s) == 89:
|
elif len(s) == 89:
|
||||||
@@ -429,7 +433,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
elif len(s) == 86:
|
elif len(s) == 86:
|
||||||
return s[5:34] + s[0] + s[35:38] + s[3] + s[39:45] + s[38] + s[46:53] + s[73] + s[54:73] + s[85] + s[74:85] + s[53]
|
return s[5:34] + s[0] + s[35:38] + s[3] + s[39:45] + s[38] + s[46:53] + s[73] + s[54:73] + s[85] + s[74:85] + s[53]
|
||||||
elif len(s) == 85:
|
elif len(s) == 85:
|
||||||
return s[40] + s[82:43:-1] + s[22] + s[42:40:-1] + s[83] + s[39:22:-1] + s[0] + s[21:2:-1]
|
return s[3:11] + s[0] + s[12:55] + s[84] + s[56:84]
|
||||||
elif len(s) == 84:
|
elif len(s) == 84:
|
||||||
return s[81:36:-1] + s[0] + s[35:2:-1]
|
return s[81:36:-1] + s[0] + s[35:2:-1]
|
||||||
elif len(s) == 83:
|
elif len(s) == 83:
|
||||||
@@ -783,10 +787,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
if self._downloader.params.get('verbose'):
|
if self._downloader.params.get('verbose'):
|
||||||
s = url_data['s'][0]
|
s = url_data['s'][0]
|
||||||
if age_gate:
|
if age_gate:
|
||||||
player_version = self._search_regex(r'ad3-(.+?)\.swf',
|
player = 'flash player'
|
||||||
video_info['ad3_module'][0] if 'ad3_module' in video_info else 'NOT FOUND',
|
|
||||||
'flash player', fatal=False)
|
|
||||||
player = 'flash player %s' % player_version
|
|
||||||
else:
|
else:
|
||||||
player = u'html5 player %s' % self._search_regex(r'html5player-(.+?)\.js', video_webpage,
|
player = u'html5 player %s' % self._search_regex(r'html5player-(.+?)\.js', video_webpage,
|
||||||
'html5 player', fatal=False)
|
'html5 player', fatal=False)
|
||||||
@@ -1008,6 +1009,9 @@ class YoutubeUserIE(InfoExtractor):
|
|||||||
response = json.loads(page)
|
response = json.loads(page)
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
|
raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
|
||||||
|
if 'entry' not in response['feed']:
|
||||||
|
# Number of videos is a multiple of self._MAX_RESULTS
|
||||||
|
break
|
||||||
|
|
||||||
# Extract video identifiers
|
# Extract video identifiers
|
||||||
ids_in_page = []
|
ids_in_page = []
|
||||||
|
@@ -790,6 +790,18 @@ def platform_name():
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def write_string(s, out=None):
|
||||||
|
if out is None:
|
||||||
|
out = sys.stderr
|
||||||
|
assert type(s) == type(u'')
|
||||||
|
|
||||||
|
if ('b' in getattr(out, 'mode', '') or
|
||||||
|
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
|
||||||
|
s = s.encode(preferredencoding(), 'ignore')
|
||||||
|
out.write(s)
|
||||||
|
out.flush()
|
||||||
|
|
||||||
|
|
||||||
def bytes_to_intlist(bs):
|
def bytes_to_intlist(bs):
|
||||||
if not bs:
|
if not bs:
|
||||||
return []
|
return []
|
||||||
|
@@ -1,2 +1,2 @@
|
|||||||
|
|
||||||
__version__ = '2013.09.16'
|
__version__ = '2013.09.20.1'
|
||||||
|
Reference in New Issue
Block a user