Compare commits

..

59 Commits

Author SHA1 Message Date
Philipp Hagemeister
e2cc7983e9 release 2014.03.04 2014-03-04 03:32:54 +01:00
Philipp Hagemeister
c9ae7b9565 [youtube] Add support for search result URLs (Fixes #2495) 2014-03-04 03:32:28 +01:00
Philipp Hagemeister
86fb4347f7 release 2014.03.03 2014-03-03 13:51:25 +01:00
Philipp Hagemeister
2fcec131f5 Credit @juancri for canal13cl (#2498) 2014-03-03 12:54:01 +01:00
Philipp Hagemeister
9f62eaf4ef [canal13cl] Add test and improve extraction (#2498) 2014-03-03 12:53:11 +01:00
Philipp Hagemeister
f92259c026 Merge remote-tracking branch 'origin/master' 2014-03-03 12:34:34 +01:00
Philipp Hagemeister
0afef30b23 Add display_id field 2014-03-03 12:06:28 +01:00
Philipp Hagemeister
dcdfd1c711 Merge remote-tracking branch 'origin/master' 2014-03-03 12:05:59 +01:00
Sergey M․
2acc1f8f50 [orf] Fix segments extraction (Closes #2501) 2014-03-03 18:05:46 +07:00
Sergey M․
2c39b0c695 [tinypic] Fix import 2014-03-03 17:40:12 +07:00
Sergey M․
e77c5b4f63 [4tube] Fix import 2014-03-03 17:39:49 +07:00
Juan C. Olivares
409a16cb72 Allowing URLs for 13.cl without the /programas prefix 2014-03-02 23:41:13 -03:00
Juan C. Olivares
94d5e90b4f FIX: Typo in the extractor's name 2014-03-02 23:40:35 -03:00
Juan C. Olivares
2d73b45805 Adding support for 13.cl 2014-03-02 23:15:12 -03:00
Sergey M․
271a2dbfa2 [tvigle] Add age limit 2014-03-02 22:07:18 +07:00
Sergey M․
bf4adcac66 [tvigle] Fix like count 2014-03-02 20:56:36 +07:00
Sergey M․
fb8b8fdd62 [tvigle] Add support for tvigle.ru 2014-03-02 19:59:34 +07:00
Sergey M․
5a0b26252e [ceskatelevize] Simplify 2014-03-01 23:05:33 +07:00
Sergey M․
7d78f0cc48 [ceskatelevize] Fix video availability check and add geo unrestricted test 2014-03-01 22:54:37 +07:00
Sergey M․
f00fc78674 Merge branch '_ceskatelevize' of https://github.com/pulpe/youtube-dl into pulpe-_ceskatelevize 2014-03-01 22:26:18 +07:00
pulpe
392017874c [CeskaTelevize] raise ExtractorError if you are outside of CR 2014-03-01 16:17:29 +01:00
pulpe
c3cb92d1ab [CeskaTelevize] fix python3 support @dstftw 2014-03-01 16:02:51 +01:00
pulpe
aa5590fa07 skip test 2014-03-01 12:34:01 +01:00
pulpe
8cfb5bbf92 [CeskaTelevize] Add initial support for ceskatelevize.cz 2014-03-01 11:47:52 +01:00
Sergey M.
69bb54ebf9 [mailru] Add support for mail.ru video 2014-03-01 16:34:38 +07:00
Sergey M.
ca97a56e4b [vk] Add support for embedded videos (Closes #2473) 2014-02-28 23:51:54 +07:00
Sergey M.
fc26f3b4c2 [lifenews] Add support for multiple videos on the same page (#2482) 2014-02-28 22:52:06 +07:00
Philipp Hagemeister
f604c93c64 [gdcvault] Formatting / Remove unused variables 2014-02-28 15:50:19 +01:00
Philipp Hagemeister
dc3727b65c Credit @mnem dor GDCVault 2014-02-28 15:14:25 +01:00
Philipp Hagemeister
aba3231de1 Merge remote-tracking branch 'mnem/gdc-vault' 2014-02-28 12:52:11 +01:00
Philipp Hagemeister
9193bab91d release 2014.02.28 2014-02-28 12:31:37 +01:00
Philipp Hagemeister
fbcf3e416d Merge pull request #2463 from rzhxeo/resume
Set resume_len to 0 if download is restarted
2014-02-28 12:30:34 +01:00
Sergey M.
c0e5d85631 [vimeo] Improve thumbnail extraction 2014-02-28 18:00:12 +07:00
Sergey M.
ca7fa3dcb3 [vimeo] Fix thumbs extraction (Closes #2480) 2014-02-28 17:43:54 +07:00
Jaime Marquínez Ferrándiz
4ccfba28d9 [collegehumor] Fix test's uploader field 2014-02-27 19:10:30 +01:00
Jaime Marquínez Ferrándiz
abb82f1ddc [mixcloud] Unquote the track id (#2462) 2014-02-27 18:58:09 +01:00
Philipp Hagemeister
cda008cff1 release 2014.02.27.1 2014-02-27 16:09:58 +01:00
Sergey M.
1877a14049 [lifenews] Switch to non-mobile webpage version (Fixes #2476) 2014-02-27 21:45:34 +07:00
David Wagner
546582ec3e Removing MD5 check for ethereal file. 2014-02-27 14:28:55 +00:00
David Wagner
4534485586 Fix test, remove unused, tidy quotes and brackets 2014-02-27 12:50:48 +00:00
Sergey M.
a9ab8855e4 [prosiebensat1] Fix typo 2014-02-27 17:53:09 +07:00
Sergey M.
8a44ef6868 [prosiebensat1] Add rtmpe support 2014-02-27 17:52:52 +07:00
Sergey M.
0c7214c404 [prosiebensat1] Add support for ProSiebenSat.1 Digital sites (Closes
#2346 #2469)
2014-02-27 17:44:29 +07:00
Sergey M.
4cf9654693 Add one more format to unified_strdate 2014-02-27 17:44:05 +07:00
David Wagner
50a138d95c Add support for authenticated videos 2014-02-27 10:32:31 +00:00
Philipp Hagemeister
91346358b0 release 2014.02.27 2014-02-27 07:22:34 +01:00
Philipp Hagemeister
f3783d4b77 Merge branch 'master' of github.com:rg3/youtube-dl 2014-02-27 07:22:22 +01:00
Philipp Hagemeister
89ef304bed [generic] Add support for <meta redirect>
Fixes #413
2014-02-27 07:22:02 +01:00
David Wagner
83cebb8b7a Add support for FLV videos with speaker decks 2014-02-27 00:20:34 +00:00
David Wagner
9e68f9fdf1 Extractor for non-password protected GDC Vault videos 2014-02-26 22:33:33 +00:00
Sergey M.
2acea5c03d [mit] Fix MITIE test 2014-02-26 18:09:43 +07:00
Sergey M.
978177527e [rtlnow] Remove unused import 2014-02-26 18:02:17 +07:00
Sergey M.
2648c436f3 Merge pull request #2464 from rzhxeo/xhamster
[XHamsterIE] Make hd video search more robust
2014-02-26 02:53:54 -08:00
Sergey M.
33f1f2c455 [rtlnow] Fix duration extraction 2014-02-26 17:49:49 +07:00
Sergey M.
995befe0e9 [rtlnow] Replace n-tvnow.de test 2014-02-26 17:43:56 +07:00
Sergey M.
1bb92aff55 [rtlnow] Modernize and add f4m support 2014-02-26 17:36:16 +07:00
rzhxeo
b8e1471d3a [XHamsterIE] Make hd video search more robust 2014-02-26 10:01:44 +01:00
rzhxeo
60daf7f0bb Set resume_len to 0 if download is restarted 2014-02-26 02:47:27 +01:00
Philipp Hagemeister
a83a3139d1 [mit] Add import 2014-02-26 00:41:13 +01:00
28 changed files with 1056 additions and 161 deletions

View File

@@ -71,6 +71,10 @@ class TestAllURLsMatching(unittest.TestCase):
def test_youtube_truncated(self):
self.assertMatch('http://www.youtube.com/watch?', ['youtube:truncated_url'])
def test_youtube_search_matching(self):
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
def test_justin_tv_channelid_matching(self):
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv"))
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv"))

View File

@@ -16,6 +16,7 @@ from youtube_dl.extractor import (
YoutubeChannelIE,
YoutubeShowIE,
YoutubeTopListIE,
YoutubeSearchURLIE,
)
@@ -133,5 +134,14 @@ class TestYoutubeLists(unittest.TestCase):
entries = result['entries']
self.assertTrue(len(entries) >= 5)
def test_youtube_search_url(self):
dl = FakeYDL()
ie = YoutubeSearchURLIE(dl)
result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
entries = result['entries']
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'youtube-dl test video')
self.assertTrue(len(entries) >= 5)
if __name__ == '__main__':
unittest.main()

View File

@@ -675,6 +675,9 @@ class YoutubeDL(object):
info_dict['playlist'] = None
info_dict['playlist_index'] = None
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
# This extractors handle format selection themselves
if info_dict['extractor'] in ['Youku']:
if download:

View File

@@ -48,6 +48,8 @@ __authors__ = (
'Niklas Laxström',
'David Triendl',
'Anthony Weems',
'David Wagner',
'Juan C. Olivares',
)
__license__ = 'Public Domain'

View File

@@ -85,6 +85,7 @@ class HttpFD(FileDownloader):
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
resume_len = 0
open_mode = 'wb'
break
# Retry

View File

@@ -23,9 +23,11 @@ from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .c56 import C56IE
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .cinemassacre import CinemassacreIE
@@ -89,6 +91,7 @@ from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gamespot import GameSpotIE
from .gametrailers import GametrailersIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
@@ -133,6 +136,7 @@ from .lynda import (
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
@@ -175,6 +179,7 @@ from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .radiofrance import RadioFranceIE
from .rbmaradio import RBMARadioIE
@@ -234,6 +239,7 @@ from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tutv import TutvIE
from .tvigle import TvigleIE
from .tvp import TvpIE
from .unistra import UnistraIE
from .ustream import UstreamIE, UstreamChannelIE
@@ -279,19 +285,20 @@ from .youku import YoukuIE
from .youporn import YouPornIE
from .youtube import (
YoutubeIE,
YoutubePlaylistIE,
YoutubeSearchIE,
YoutubeSearchDateIE,
YoutubeUserIE,
YoutubeChannelIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeRecommendedIE,
YoutubeTruncatedURLIE,
YoutubeWatchLaterIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTopListIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zdf import ZDFIE

View File

@@ -0,0 +1,48 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class Canal13clIE(InfoExtractor):
_VALID_URL = r'^http://(?:www\.)?13\.cl/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://www.13.cl/t13/nacional/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'md5': '4cb1fa38adcad8fea88487a078831755',
'info_dict': {
'id': '1403022125',
'display_id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'ext': 'mp4',
'title': 'El "círculo de hierro" de Michelle Bachelet en su regreso a La Moneda',
'description': '(Foto: Agencia Uno) En nueve días más, Michelle Bachelet va a asumir por segunda vez como presidenta de la República. Entre aquellos que la acompañarán hay caras que se repiten y otras que se consolidan en su entorno de colaboradores más cercanos.',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = self._html_search_meta(
'twitter:title', webpage, 'title', fatal=True)
description = self._html_search_meta(
'twitter:description', webpage, 'description')
url = self._html_search_regex(
r'articuloVideo = \"(.*?)\"', webpage, 'url')
real_id = self._search_regex(
r'[^0-9]([0-9]{7,})[^0-9]', url, 'id', default=display_id)
thumbnail = self._html_search_regex(
r'articuloImagen = \"(.*?)\"', webpage, 'thumbnail')
return {
'id': real_id,
'display_id': display_id,
'url': url,
'title': title,
'description': description,
'ext': 'mp4',
'thumbnail': thumbnail,
}

View File

@@ -0,0 +1,126 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
ExtractorError,
)
class CeskaTelevizeIE(InfoExtractor):
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
_TESTS = [
{
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/213512120230004-spanelska-chripka',
'info_dict': {
'id': '213512120230004',
'ext': 'flv',
'title': 'První republika: Španělská chřipka',
'duration': 3107.4,
},
'params': {
'skip_download': True, # requires rtmpdump
},
'skip': 'Works only from Czech Republic.',
},
{
'url': 'http://www.ceskatelevize.cz/ivysilani/1030584952-tsatsiki-maminka-a-policajt',
'info_dict': {
'id': '20138143440',
'ext': 'flv',
'title': 'Tsatsiki, maminka a policajt',
'duration': 6754.1,
},
'params': {
'skip_download': True, # requires rtmpdump
},
'skip': 'Works only from Czech Republic.',
},
{
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
'info_dict': {
'id': '14716',
'ext': 'flv',
'title': 'První republika: Zpěvačka z Dupárny Bobina',
'duration': 90,
},
'params': {
'skip_download': True, # requires rtmpdump
},
},
]
def _real_extract(self, url):
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
data = {
'playlist[0][type]': typ,
'playlist[0][id]': episode_id,
'requestUrl': compat_urllib_parse_urlparse(url).path,
'requestSource': 'iVysilani',
}
req = compat_urllib_request.Request('http://www.ceskatelevize.cz/ivysilani/ajax/get-playlist-url',
data=compat_urllib_parse.urlencode(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1')
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
playlistpage = self._download_json(req, video_id)
req = compat_urllib_request.Request(compat_urllib_parse.unquote(playlistpage['url']))
req.add_header('Referer', url)
playlist = self._download_xml(req, video_id)
formats = []
for i in playlist.find('smilRoot/body'):
if 'AD' not in i.attrib['id']:
base_url = i.attrib['base']
parsedurl = compat_urllib_parse_urlparse(base_url)
duration = i.attrib['duration']
for video in i.findall('video'):
if video.attrib['label'] != 'AD':
format_id = video.attrib['label']
play_path = video.attrib['src']
vbr = int(video.attrib['system-bitrate'])
formats.append({
'format_id': format_id,
'url': base_url,
'vbr': vbr,
'play_path': play_path,
'app': parsedurl.path[1:] + '?' + parsedurl.query,
'rtmp_live': True,
'ext': 'flv',
})
self._sort_formats(formats)
return {
'id': episode_id,
'title': self._html_search_regex(r'<title>(.+?) — iVysílání — Česká televize</title>', webpage, 'title'),
'duration': float(duration),
'formats': formats,
}

View File

@@ -40,7 +40,7 @@ class CollegeHumorIE(InfoExtractor):
'id': 'W5gMp3ZjYg4',
'ext': 'mp4',
'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
'uploader': 'Funnyplox TV',
'uploader': 'FunnyPlox TV',
'uploader_id': 'funnyploxtv',
'description': 'md5:7ded37421526d54afdf005e25bc2b7a3',
'upload_date': '20140128',

View File

@@ -88,6 +88,10 @@ class InfoExtractor(object):
The following fields are optional:
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries (with the entries "resolution" and
"url") for the varying thumbnails
thumbnail: Full URL to a video thumbnail image.
@@ -432,14 +436,14 @@ class InfoExtractor(object):
if secure: regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _html_search_meta(self, name, html, display_name=None):
def _html_search_meta(self, name, html, display_name=None, fatal=False):
if display_name is None:
display_name = name
return self._html_search_regex(
r'''(?ix)<meta
(?=[^>]+(?:itemprop|name|property)=["\']%s["\'])
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
html, display_name, fatal=False)
html, display_name, fatal=fatal)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')

View File

@@ -8,8 +8,8 @@ from ..utils import (
unified_strdate,
str_to_int,
parse_duration,
clean_html,
)
from youtube_dl.utils import clean_html
class FourTubeIE(InfoExtractor):

View File

@@ -0,0 +1,134 @@
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
compat_urllib_request,
)
class GDCVaultIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)'
_TESTS = [
{
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
'info_dict': {
'id': '1019721',
'ext': 'mp4',
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
}
},
{
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
'info_dict': {
'id': '1015683',
'ext': 'flv',
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
},
'params': {
'skip_download': True, # Requires rtmpdump
}
},
]
def _parse_mp4(self, xml_description):
video_formats = []
mp4_video = xml_description.find('./metadata/mp4video')
if mp4_video is None:
return None
mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video.text)
video_root = mobj.group('root')
formats = xml_description.findall('./metadata/MBRVideos/MBRVideo')
for format in formats:
mobj = re.match(r'mp4\:(?P<path>.*)', format.find('streamName').text)
url = video_root + mobj.group('path')
vbr = format.find('bitrate').text
video_formats.append({
'url': url,
'vbr': int(vbr),
})
return video_formats
def _parse_flv(self, xml_description):
video_formats = []
akami_url = xml_description.find('./metadata/akamaiHost').text
slide_video_path = xml_description.find('./metadata/slideVideo').text
video_formats.append({
'url': 'rtmp://' + akami_url + '/' + slide_video_path,
'format_note': 'slide deck video',
'quality': -2,
'preference': -2,
'format_id': 'slides',
})
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
video_formats.append({
'url': 'rtmp://' + akami_url + '/' + speaker_video_path,
'format_note': 'speaker video',
'quality': -1,
'preference': -1,
'format_id': 'speaker',
})
return video_formats
def _login(self, webpage_url, video_id):
(username, password) = self._get_login_info()
if username is None or password is None:
self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
return None
mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url)
login_url = mobj.group('root_url') + 'api/login.php'
logout_url = mobj.group('root_url') + 'logout'
login_form = {
'email': username,
'password': password,
}
request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, video_id, 'Logging in')
start_page = self._download_webpage(webpage_url, video_id, 'Getting authenticated video page')
self._download_webpage(logout_url, video_id, 'Logging out')
return start_page
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage_url = 'http://www.gdcvault.com/play/' + video_id
start_page = self._download_webpage(webpage_url, video_id)
xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root', None, False)
if xml_root is None:
# Probably need to authenticate
start_page = self._login(webpage_url, video_id)
if start_page is None:
self.report_warning('Could not login.')
else:
# Grab the url from the authenticated page
xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root')
xml_name = self._html_search_regex(r'<iframe src=".*?\?xml=(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename', None, False)
if xml_name is None:
# Fallback to the older format
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
xml_decription_url = xml_root + 'xml/' + xml_name
xml_description = self._download_xml(xml_decription_url, video_id)
video_title = xml_description.find('./metadata/title').text
video_formats = self._parse_mp4(xml_description)
if video_formats is None:
video_formats = self._parse_flv(xml_description)
return {
'id': video_id,
'title': video_title,
'formats': video_formats,
}

View File

@@ -83,10 +83,10 @@ class GenericIE(InfoExtractor):
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'file': 'trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
@@ -94,7 +94,6 @@ class GenericIE(InfoExtractor):
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'file': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ.mp4',
'md5': '5644c6ca5d5782c1d0d350dad9bd840c',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
@@ -102,6 +101,22 @@ class GenericIE(InfoExtractor):
'title': '2cc213299525360.mov', # that's what we get
},
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': 'Chris Ziegler takes a look at the Alcatel OneTouch Fire and the ZTE Open; two of the first Firefox OS handsets to be officially announced.',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
}
]
def report_download_webpage(self, video_id):
@@ -381,6 +396,11 @@ class GenericIE(InfoExtractor):
if mobj is not None:
return self.url_result(mobj.group('url'), 'Facebook')
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
@@ -411,6 +431,18 @@ class GenericIE(InfoExtractor):
if mobj is None:
# HTML5 video
mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
if mobj is None:
mobj = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
webpage)
if mobj:
new_url = mobj.group(1)
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
if mobj is None:
raise ExtractorError('Unsupported URL: %s' % url)

View File

@@ -6,7 +6,8 @@ import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate
unified_strdate,
ExtractorError,
)
@@ -32,13 +33,11 @@ class LifeNewsIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage('http://lifenews.ru/mobile/news/%s' % video_id, video_id, 'Downloading page')
webpage = self._download_webpage('http://lifenews.ru/news/%s' % video_id, video_id, 'Downloading page')
video_url = self._html_search_regex(
r'<video.*?src="([^"]+)".*?></video>', webpage, 'video URL')
thumbnail = self._html_search_regex(
r'<video.*?poster="([^"]+)".*?"></video>', webpage, 'video thumbnail')
videos = re.findall(r'<video.*?poster="(?P<poster>[^"]+)".*?src="(?P<video>[^"]+)".*?></video>', webpage)
if not videos:
raise ExtractorError('No media links available for %s' % video_id)
title = self._og_search_title(webpage)
TITLE_SUFFIX = ' - Первый по срочным новостям — LIFE | NEWS'
@@ -50,20 +49,26 @@ class LifeNewsIE(InfoExtractor):
view_count = self._html_search_regex(
r'<div class=\'views\'>(\d+)</div>', webpage, 'view count', fatal=False)
comment_count = self._html_search_regex(
r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count', fatal=False)
r'<div class=\'comments\'>\s*<span class=\'counter\'>(\d+)</span>', webpage, 'comment count', fatal=False)
upload_date = self._html_search_regex(
r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
if upload_date is not None:
upload_date = unified_strdate(upload_date)
return {
'id': video_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title,
'description': description,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'upload_date': upload_date,
}
def make_entry(video_id, media, video_number=None):
return {
'id': video_id,
'url': media[1],
'thumbnail': media[0],
'title': title if video_number is None else '%s-video%s' % (title, video_number),
'description': description,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'upload_date': upload_date,
}
if len(videos) == 1:
return make_entry(video_id, videos[0])
else:
return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)]

View File

@@ -0,0 +1,66 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
import datetime
from .common import InfoExtractor
class MailRuIE(InfoExtractor):
IE_NAME = 'mailru'
IE_DESC = 'Видео@Mail.Ru'
_VALID_URL = r'http://(?:www\.)?my\.mail\.ru/video/.*#video=/?(?P<id>[^/]+/[^/]+/[^/]+/\d+)'
_TEST = {
'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
'md5': 'dea205f03120046894db4ebb6159879a',
'info_dict': {
'id': '46301138',
'ext': 'mp4',
'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
'upload_date': '20140224',
'uploader': 'sonypicturesrus',
'uploader_id': 'sonypicturesrus@mail.ru',
'duration': 184,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_data = self._download_json(
'http://videoapi.my.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON')
author = video_data['author']
uploader = author['name']
uploader_id = author['id']
movie = video_data['movie']
content_id = str(movie['contentId'])
title = movie['title']
thumbnail = movie['poster']
duration = movie['duration']
upload_date = datetime.datetime.fromtimestamp(video_data['timestamp']).strftime('%Y%m%d')
view_count = video_data['views_count']
formats = [
{
'url': video['url'],
'format_id': video['name'],
} for video in video_data['videos']
]
return {
'id': content_id,
'title': title,
'thumbnail': thumbnail,
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'view_count': view_count,
'formats': formats,
}

View File

@@ -8,6 +8,7 @@ from .youtube import YoutubeIE
from ..utils import (
compat_urlparse,
clean_html,
ExtractorError,
get_element_by_id,
)
@@ -72,7 +73,6 @@ class MITIE(TechTVMITIE):
_TEST = {
'url': 'http://video.mit.edu/watch/the-government-is-profiling-you-13222/',
'file': '.mp4',
'md5': '7db01d5ccc1895fc5010e9c9e13648da',
'info_dict': {
'id': '21783',

View File

@@ -5,6 +5,7 @@ import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
compat_urllib_parse,
ExtractorError,
)
@@ -15,8 +16,9 @@ class MixcloudIE(InfoExtractor):
_TEST = {
'url': 'http://www.mixcloud.com/dholbach/cryptkeeper/',
'file': 'dholbach-cryptkeeper.mp3',
'info_dict': {
'id': 'dholbach-cryptkeeper',
'ext': 'mp3',
'title': 'Cryptkeeper',
'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',
'uploader': 'Daniel Holbach',
@@ -45,7 +47,7 @@ class MixcloudIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group(1)
cloudcast_name = mobj.group(2)
track_id = '-'.join((uploader, cloudcast_name))
track_id = compat_urllib_parse.unquote('-'.join((uploader, cloudcast_name)))
webpage = self._download_webpage(url, track_id)

View File

@@ -8,6 +8,7 @@ from .common import InfoExtractor
from ..utils import (
HEADRequest,
unified_strdate,
ExtractorError,
)
@@ -35,7 +36,15 @@ class ORFIE(InfoExtractor):
data_json = self._search_regex(
r'initializeAdworx\((.+?)\);\n', webpage, 'video info')
all_data = json.loads(data_json)
sdata = all_data[0]['values']['segments']
def get_segments(all_data):
for data in all_data:
if data['name'] == 'Tracker::EPISODE_DETAIL_PAGE_OVER_PROGRAM':
return data['values']['segments']
sdata = get_segments(all_data)
if not sdata:
raise ExtractorError('Unable to extract segments')
def quality_to_int(s):
m = re.search('([0-9]+)', s)

View File

@@ -0,0 +1,297 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from hashlib import sha1
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
unified_strdate,
clean_html,
RegexNotFoundError,
)
class ProSiebenSat1IE(InfoExtractor):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
_VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|ran|the-voice-of-germany)\.de|fem\.com)/(?P<id>.+)'
_TESTS = [
{
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
'ext': 'mp4',
'title': 'Staffel 2, Episode 18 - Jahresrückblick',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html',
'info_dict': {
'id': '2570327',
'ext': 'mp4',
'title': 'Lady-Umstyling für Audrina',
'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d',
'upload_date': '20131014',
'duration': 606.76,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Seems to be broken',
},
{
'url': 'http://www.prosiebenmaxx.de/yep/one-piece/video/148-folge-48-gold-rogers-heimat-ganze-folge',
'info_dict': {
'id': '2437108',
'ext': 'mp4',
'title': 'Folge 48: Gold Rogers Heimat',
'description': 'Ruffy erreicht die Insel, auf der der berühmte Gold Roger lebte und hingerichtet wurde.',
'upload_date': '20140226',
'duration': 1401.48,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip',
'info_dict': {
'id': '2904997',
'ext': 'mp4',
'title': 'Sexy laufen in Ugg Boots',
'description': 'md5:edf42b8bd5bc4e5da4db4222c5acb7d6',
'upload_date': '20140122',
'duration': 245.32,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.sat1.de/film/der-ruecktritt/video/im-interview-kai-wiesinger-clip',
'info_dict': {
'id': '2906572',
'ext': 'mp4',
'title': 'Im Interview: Kai Wiesinger',
'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
'upload_date': '20140225',
'duration': 522.56,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.kabeleins.de/tv/rosins-restaurants/videos/jagd-auf-fertigkost-im-elsthal-teil-2-ganze-folge',
'info_dict': {
'id': '2992323',
'ext': 'mp4',
'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
'description': 'md5:2669cde3febe9bce13904f701e774eb6',
'upload_date': '20140225',
'duration': 2410.44,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.ran.de/fussball/bundesliga/video/schalke-toennies-moechte-raul-zurueck-ganze-folge',
'info_dict': {
'id': '3004256',
'ext': 'mp4',
'title': 'Schalke: Tönnies möchte Raul zurück',
'description': 'md5:4b5b271d9bcde223b54390754c8ece3f',
'upload_date': '20140226',
'duration': 228.96,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip',
'info_dict': {
'id': '2572814',
'ext': 'mp4',
'title': 'Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
'upload_date': '20131017',
'duration': 469.88,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
'info_dict': {
'id': '2156342',
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
'description': 'md5:8ba6301e70351ae0bedf8da00f7ba528',
'upload_date': '20130206',
'duration': 307.24,
},
'params': {
# rtmp download
'skip_download': True,
},
},
]
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
]
_TITLE_REGEXES = [
r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
r'<header class="clearfix">\s*<h3>(.+?)</h3>',
r'<!-- start video -->\s*<h1>(.+?)</h1>',
r'<div class="ep-femvideos-pi4-video-txt">\s*<h2>(.+?)</h2>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
r'<p>(.+?)</p>\s*<div class="ep-femvideos-pi4-video-footer">',
]
_UPLOAD_DATE_REGEXES = [
r'<meta property="og:published_time" content="(.+?)">',
r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
def extract(patterns, name, page, fatal=False):
for pattern in patterns:
mobj = re.search(pattern, page)
if mobj:
return clean_html(mobj.group(1))
if fatal:
raise RegexNotFoundError(u'Unable to extract %s' % name)
return None
clip_id = extract(self._CLIPID_REGEXES, 'clip id', page, fatal=True)
access_token = 'testclient'
client_name = 'kolibri-1.2.5'
client_location = url
videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({
'access_token': access_token,
'client_location': client_location,
'client_name': client_name,
'ids': clip_id,
})
videos = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')
duration = float(videos[0]['duration'])
source_ids = [source['id'] for source in videos[0]['sources']]
source_ids_str = ','.join(map(str, source_ids))
g = '01!8d8F_)r9]4s[qeuXfP%'
client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
.encode('utf-8')).hexdigest()
sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
'client_name': client_name,
}))
sources = self._download_json(sources_api_url, clip_id, 'Downloading sources JSON')
server_id = sources['server_id']
client_id = g[:2] + sha1(''.join([g, clip_id, access_token, server_id,
client_location, source_ids_str, g, client_name])
.encode('utf-8')).hexdigest()
url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
'client_name': client_name,
'server_id': server_id,
'source_ids': source_ids_str,
}))
urls = self._download_json(url_api_url, clip_id, 'Downloading urls JSON')
title = extract(self._TITLE_REGEXES, 'title', page, fatal=True)
description = extract(self._DESCRIPTION_REGEXES, 'description', page)
thumbnail = self._og_search_thumbnail(page)
upload_date = extract(self._UPLOAD_DATE_REGEXES, 'upload date', page)
if upload_date:
upload_date = unified_strdate(upload_date)
formats = []
urls_sources = urls['sources']
if isinstance(urls_sources, dict):
urls_sources = urls_sources.values()
def fix_bitrate(bitrate):
return bitrate / 1000 if bitrate % 1000 == 0 else bitrate
for source in urls_sources:
protocol = source['protocol']
if protocol == 'rtmp' or protocol == 'rtmpe':
mobj = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', source['url'])
if not mobj:
continue
formats.append({
'url': mobj.group('url'),
'app': mobj.group('app'),
'play_path': mobj.group('playpath'),
'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
'page_url': 'http://www.prosieben.de',
'vbr': fix_bitrate(source['bitrate']),
'ext': 'mp4',
'format_id': '%s_%s' % (source['cdn'], source['bitrate']),
})
else:
formats.append({
'url': source['url'],
'vbr': fix_bitrate(source['bitrate']),
})
self._sort_formats(formats)
return {
'id': clip_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
}

View File

@@ -1,148 +1,165 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError,
clean_html,
unified_strdate,
int_or_none,
)
class RTLnowIE(InfoExtractor):
"""Information Extractor for RTL NOW, RTL2 NOW, RTL NITRO, SUPER RTL NOW, VOX NOW and n-tv NOW"""
_VALID_URL = r'(?:http://)?(?P<url>(?P<domain>rtl-now\.rtl\.de|rtl2now\.rtl2\.de|(?:www\.)?voxnow\.de|(?:www\.)?rtlnitronow\.de|(?:www\.)?superrtlnow\.de|(?:www\.)?n-tvnow\.de)/+[a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.php\?(?:container_id|film_id)=(?P<video_id>[0-9]+)&player=1(?:&season=[0-9]+)?(?:&.*)?)'
_TESTS = [{
'url': 'http://rtl-now.rtl.de/ahornallee/folge-1.php?film_id=90419&player=1&season=1',
'file': '90419.flv',
'info_dict': {
'upload_date': '20070416',
'title': 'Ahornallee - Folge 1 - Der Einzug',
'description': 'Folge 1 - Der Einzug',
_VALID_URL = r'''(?x)
(?:https?://)?
(?P<url>
(?P<domain>
rtl-now\.rtl\.de|
rtl2now\.rtl2\.de|
(?:www\.)?voxnow\.de|
(?:www\.)?rtlnitronow\.de|
(?:www\.)?superrtlnow\.de|
(?:www\.)?n-tvnow\.de)
/+[a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.php\?
(?:container_id|film_id)=(?P<video_id>[0-9]+)&
player=1(?:&season=[0-9]+)?(?:&.*)?
)'''
_TESTS = [
{
'url': 'http://rtl-now.rtl.de/ahornallee/folge-1.php?film_id=90419&player=1&season=1',
'info_dict': {
'id': '90419',
'ext': 'flv',
'title': 'Ahornallee - Folge 1 - Der Einzug',
'description': 'md5:ce843b6b5901d9a7f7d04d1bbcdb12de',
'upload_date': '20070416',
'duration': 1685,
},
'params': {
'skip_download': True,
},
'skip': 'Only works from Germany',
},
'params': {
'skip_download': True,
{
'url': 'http://rtl2now.rtl2.de/aerger-im-revier/episode-15-teil-1.php?film_id=69756&player=1&season=2&index=5',
'info_dict': {
'id': '69756',
'ext': 'flv',
'title': 'Ärger im Revier - Ein junger Ladendieb, ein handfester Streit u.a.',
'description': 'md5:3fb247005ed21a935ffc82b7dfa70cf0',
'thumbnail': 'http://autoimg.static-fra.de/rtl2now/219850/1500x1500/image2.jpg',
'upload_date': '20120519',
'duration': 1245,
},
'params': {
'skip_download': True,
},
'skip': 'Only works from Germany',
},
'skip': 'Only works from Germany',
},
{
'url': 'http://rtl2now.rtl2.de/aerger-im-revier/episode-15-teil-1.php?film_id=69756&player=1&season=2&index=5',
'file': '69756.flv',
'info_dict': {
'upload_date': '20120519',
'title': 'Ärger im Revier - Ein junger Ladendieb, ein handfester Streit...',
'description': 'Ärger im Revier - Ein junger Ladendieb, ein handfester Streit u.a.',
'thumbnail': 'http://autoimg.static-fra.de/rtl2now/219850/1500x1500/image2.jpg',
{
'url': 'http://www.voxnow.de/voxtours/suedafrika-reporter-ii.php?film_id=13883&player=1&season=17',
'info_dict': {
'id': '13883',
'ext': 'flv',
'title': 'Voxtours - Südafrika-Reporter II',
'description': 'md5:de7f8d56be6fd4fed10f10f57786db00',
'upload_date': '20090627',
'duration': 1800,
},
'params': {
'skip_download': True,
},
},
'params': {
'skip_download': True,
{
'url': 'http://superrtlnow.de/medicopter-117/angst.php?film_id=99205&player=1',
'info_dict': {
'id': '99205',
'ext': 'flv',
'title': 'Medicopter 117 - Angst!',
'description': 'md5:895b1df01639b5f61a04fc305a5cb94d',
'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg',
'upload_date': '20080928',
'duration': 2691,
},
'params': {
'skip_download': True,
},
},
'skip': 'Only works from Germany',
},
{
'url': 'http://www.voxnow.de/voxtours/suedafrika-reporter-ii.php?film_id=13883&player=1&season=17',
'file': '13883.flv',
'info_dict': {
'upload_date': '20090627',
'title': 'Voxtours - Südafrika-Reporter II',
'description': 'Südafrika-Reporter II',
{
'url': 'http://www.n-tvnow.de/deluxe-alles-was-spass-macht/thema-ua-luxushotel-fuer-vierbeiner.php?container_id=153819&player=1&season=0',
'info_dict': {
'id': '153819',
'ext': 'flv',
'title': 'Deluxe - Alles was Spaß macht - Thema u.a.: Luxushotel für Vierbeiner',
'description': 'md5:c3705e1bb32e1a5b2bcd634fc065c631',
'thumbnail': 'http://autoimg.static-fra.de/ntvnow/383157/1500x1500/image2.jpg',
'upload_date': '20140221',
'duration': 2429,
},
'skip': 'Only works from Germany',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://superrtlnow.de/medicopter-117/angst.php?film_id=99205&player=1',
'file': '99205.flv',
'info_dict': {
'upload_date': '20080928',
'title': 'Medicopter 117 - Angst!',
'description': 'Angst!',
'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg'
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.n-tvnow.de/top-gear/episode-1-2013-01-01-00-00-00.php?film_id=124903&player=1&season=10',
'file': '124903.flv',
'info_dict': {
'upload_date': '20130101',
'title': 'Top Gear vom 01.01.2013',
'description': 'Episode 1',
},
'params': {
'skip_download': True,
},
'skip': 'Only works from Germany',
}]
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
webpage_url = 'http://' + mobj.group('url')
video_page_url = 'http://' + mobj.group('domain') + '/'
video_page_url = 'http://%s/' % mobj.group('domain')
video_id = mobj.group('video_id')
webpage = self._download_webpage(webpage_url, video_id)
webpage = self._download_webpage('http://' + mobj.group('url'), video_id)
note_m = re.search(r'''(?sx)
<div[ ]style="margin-left:[ ]20px;[ ]font-size:[ ]13px;">(.*?)
<div[ ]id="playerteaser">''', webpage)
if note_m:
msg = clean_html(note_m.group(1))
raise ExtractorError(msg)
mobj = re.search(r'(?s)<div style="margin-left: 20px; font-size: 13px;">(.*?)<div id="playerteaser">', webpage)
if mobj:
raise ExtractorError(clean_html(mobj.group(1)), expected=True)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage, default=None)
upload_date = unified_strdate(self._html_search_meta('uploadDate', webpage, 'upload date'))
mobj = re.search(r'<meta itemprop="duration" content="PT(?P<seconds>\d+)S" />', webpage)
duration = int(mobj.group('seconds')) if mobj else None
video_title = self._html_search_regex(
r'<title>(?P<title>[^<]+?)( \| [^<]*)?</title>',
webpage, 'title')
playerdata_url = self._html_search_regex(
r'\'playerdata\': \'(?P<playerdata_url>[^\']+)\'',
webpage, 'playerdata_url')
r"'playerdata': '(?P<playerdata_url>[^']+)'", webpage, 'playerdata_url')
playerdata = self._download_webpage(playerdata_url, video_id)
mobj = re.search(r'<title><!\[CDATA\[(?P<description>.+?)(?:\s+- (?:Sendung )?vom (?P<upload_date_d>[0-9]{2})\.(?P<upload_date_m>[0-9]{2})\.(?:(?P<upload_date_Y>[0-9]{4})|(?P<upload_date_y>[0-9]{2})) [0-9]{2}:[0-9]{2} Uhr)?\]\]></title>', playerdata)
if mobj:
video_description = mobj.group('description')
if mobj.group('upload_date_Y'):
video_upload_date = mobj.group('upload_date_Y')
elif mobj.group('upload_date_y'):
video_upload_date = '20' + mobj.group('upload_date_y')
playerdata = self._download_xml(playerdata_url, video_id, 'Downloading player data XML')
videoinfo = playerdata.find('./playlist/videoinfo')
formats = []
for filename in videoinfo.findall('filename'):
mobj = re.search(r'(?P<url>rtmpe://(?:[^/]+/){2})(?P<play_path>.+)', filename.text)
if mobj:
fmt = {
'url': mobj.group('url'),
'play_path': 'mp4:' + mobj.group('play_path'),
'page_url': video_page_url,
'player_url': video_page_url + 'includes/vodplayer.swf',
}
else:
video_upload_date = None
if video_upload_date:
video_upload_date += mobj.group('upload_date_m') + mobj.group('upload_date_d')
else:
video_description = None
video_upload_date = None
self._downloader.report_warning('Unable to extract description and upload date')
# Thumbnail: not every video has an thumbnail
mobj = re.search(r'<meta property="og:image" content="(?P<thumbnail>[^"]+)">', webpage)
if mobj:
video_thumbnail = mobj.group('thumbnail')
else:
video_thumbnail = None
mobj = re.search(r'<filename [^>]+><!\[CDATA\[(?P<url>rtmpe://(?:[^/]+/){2})(?P<play_path>[^\]]+)\]\]></filename>', playerdata)
if mobj is None:
raise ExtractorError('Unable to extract media URL')
video_url = mobj.group('url')
video_play_path = 'mp4:' + mobj.group('play_path')
video_player_url = video_page_url + 'includes/vodplayer.swf'
fmt = {
'url': filename.text,
}
fmt.update({
'width': int_or_none(filename.get('width')),
'height': int_or_none(filename.get('height')),
'vbr': int_or_none(filename.get('bitrate')),
'ext': 'flv',
})
formats.append(fmt)
return {
'id': video_id,
'url': video_url,
'play_path': video_play_path,
'page_url': video_page_url,
'player_url': video_player_url,
'ext': 'flv',
'title': video_title,
'description': video_description,
'upload_date': video_upload_date,
'thumbnail': video_thumbnail,
}
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
}

View File

@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
from youtube_dl.utils import ExtractorError
from ..utils import ExtractorError
class TinyPicIE(InfoExtractor):

View File

@@ -0,0 +1,71 @@
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
clean_html,
int_or_none,
)
class TvigleIE(InfoExtractor):
IE_NAME = 'tvigle'
IE_DESC = 'Интернет-телевидение Tvigle.ru'
_VALID_URL = r'http://(?:www\.)?tvigle\.ru/category/.+?video=(?P<id>\d+)'
_TEST = {
'url': 'http://www.tvigle.ru/category/cinema/1608/?video=503081',
'md5': '09afba4616666249f087efc6dcf83cb3',
'info_dict': {
'id': '503081',
'ext': 'flv',
'title': 'Брат 2 ',
'description': 'md5:f5a42970f50648cee3d7ad740f3ae769',
'upload_date': '20110919',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_data = self._download_xml(
'http://www.tvigle.ru/xml/single.php?obj=%s' % video_id, video_id, 'Downloading video XML')
video = video_data.find('./video')
title = video.get('name')
description = video.get('anons')
if description:
description = clean_html(description)
thumbnail = video_data.get('img')
upload_date = unified_strdate(video.get('date'))
like_count = int_or_none(video.get('vtp'))
formats = []
for num, (format_id, format_note) in enumerate([['low_file', 'SQ'], ['file', 'HQ'], ['hd', 'HD 720']]):
video_url = video.get(format_id)
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': format_id,
'format_note': format_note,
'quality': num,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'like_count': like_count,
'age_limit': 18,
'formats': formats,
}

View File

@@ -221,7 +221,9 @@ class VimeoIE(SubtitlesInfoExtractor):
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
_, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in config["video"]["thumbs"].items())[-1]
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in video_thumbs.items())[-1]
# Extract video description
video_description = None

View File

@@ -16,7 +16,7 @@ from ..utils import (
class VKIE(InfoExtractor):
IE_NAME = 'vk.com'
_VALID_URL = r'https?://vk\.com/(?:videos.*?\?.*?z=)?video(?P<id>.*?)(?:\?|%2F|$)'
_VALID_URL = r'https?://vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>\d+).*?\bid=(?P<id>\d+)|(?:videos.*?\?.*?z=)?video(?P<videoid>.*?)(?:\?|%2F|$))'
_NETRC_MACHINE = 'vk'
_TESTS = [
@@ -42,6 +42,18 @@ class VKIE(InfoExtractor):
'duration': 558,
}
},
{
'note': 'Embedded video',
'url': 'http://vk.com/video_ext.php?oid=32194266&id=162925554&hash=7d8c2e0d5e05aeaa&hd=1',
'md5': 'c7ce8f1f87bec05b3de07fdeafe21a0a',
'info_dict': {
'id': '162925554',
'ext': 'mp4',
'uploader': 'Vladimir Gavrin',
'title': 'Lin Dan',
'duration': 101,
}
},
{
'url': 'http://vk.com/video-8871596_164049491',
'md5': 'a590bcaf3d543576c9bd162812387666',
@@ -54,7 +66,7 @@ class VKIE(InfoExtractor):
'duration': 8352,
},
'skip': 'Requires vk account credentials',
}
},
]
def _login(self):
@@ -82,7 +94,10 @@ class VKIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video_id = mobj.group('videoid')
if not video_id:
video_id = '%s_%s' % (mobj.group('oid'), mobj.group('id'))
info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id
info_page = self._download_webpage(info_url, video_id)

View File

@@ -103,6 +103,7 @@ class XHamsterIE(InfoExtractor):
}]
if not hd:
mrss_url = self._search_regex(r'<link rel="canonical" href="([^"]+)', webpage, 'mrss_url')
webpage = self._download_webpage(mrss_url + '?hd', video_id, note='Downloading HD webpage')
if is_hd(webpage):
video_url = extract_video_url(webpage)

View File

@@ -1645,7 +1645,7 @@ class YoutubeChannelIE(InfoExtractor):
class YoutubeUserIE(InfoExtractor):
IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
_GDATA_PAGE_SIZE = 50
_GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
@@ -1744,12 +1744,50 @@ class YoutubeSearchIE(SearchInfoExtractor):
for video_id in video_ids]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = u'YouTube.com searches, newest videos first'
class YoutubeSearchURLIE(InfoExtractor):
IE_DESC = u'YouTube.com search URLs'
IE_NAME = u'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse.unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
r'(?s)<ol id="search-results"(.*?)</ol>', webpage, u'result HTML')
part_codes = re.findall(
r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
r'(?s)title="([^"]+)"', part_code, 'item title', fatal=False)
part_url_snippet = self._html_search_regex(
r'(?s)href="([^"]+)"', part_code, 'item URL')
part_url = compat_urlparse.urljoin(
'https://www.youtube.com/', part_url_snippet)
entries.append({
'_type': 'url',
'url': part_url,
'title': part_title,
})
return {
'_type': 'playlist',
'entries': entries,
'title': query,
}
class YoutubeShowIE(InfoExtractor):
IE_DESC = u'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(.*)'

View File

@@ -772,6 +772,7 @@ def unified_strdate(date_str):
'%B %d %Y',
'%b %d %Y',
'%Y-%m-%d',
'%d.%m.%Y',
'%d/%m/%Y',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S',

View File

@@ -1,2 +1,2 @@
__version__ = '2014.02.26'
__version__ = '2014.03.04'