2013-07-10 08:49:11 -07:00
# encoding: utf-8
2014-01-05 16:47:52 -08:00
from __future__ import unicode_literals
2013-06-23 11:31:45 -07:00
import os
import re
from . common import InfoExtractor
2014-01-05 16:42:58 -08:00
from . youtube import YoutubeIE
2013-06-23 11:31:45 -07:00
from . . utils import (
compat_urllib_error ,
compat_urllib_parse ,
compat_urllib_request ,
2013-08-28 03:47:27 -07:00
compat_urlparse ,
2014-02-21 07:59:10 -08:00
compat_xml_parse_error ,
2013-06-23 11:31:45 -07:00
ExtractorError ,
2013-12-20 08:05:28 -08:00
HEADRequest ,
2014-03-10 09:31:32 -07:00
parse_xml ,
2013-10-15 03:05:13 -07:00
smuggle_url ,
unescapeHTML ,
2013-12-17 03:33:55 -08:00
unified_strdate ,
url_basename ,
2013-06-23 11:31:45 -07:00
)
2013-07-10 08:49:11 -07:00
from . brightcove import BrightcoveIE
2013-12-19 11:28:52 -08:00
from . ooyala import OoyalaIE
2014-03-16 12:00:31 -07:00
from . rutv import RUTVIE
2013-06-23 11:31:45 -07:00
2013-08-24 13:49:52 -07:00
2013-06-23 11:31:45 -07:00
class GenericIE ( InfoExtractor ) :
2014-01-05 16:47:52 -08:00
IE_DESC = ' Generic downloader that works on some sites '
2013-06-23 11:31:45 -07:00
_VALID_URL = r ' .* '
2014-01-05 16:47:52 -08:00
IE_NAME = ' generic '
2013-07-10 08:49:11 -07:00
_TESTS = [
{
2014-01-05 16:47:52 -08:00
' url ' : ' http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html ' ,
' file ' : ' 13601338388002.mp4 ' ,
' md5 ' : ' 6e15c93721d7ec9e9ca3fdbf07982cfd ' ,
' info_dict ' : {
' uploader ' : ' www.hodiho.fr ' ,
' title ' : ' R \u00e9 gis plante sa Jeep ' ,
2013-07-10 08:49:11 -07:00
}
} ,
2013-10-27 06:40:25 -07:00
# bandcamp page with custom domain
{
2014-01-05 16:47:52 -08:00
' add_ie ' : [ ' Bandcamp ' ] ,
' url ' : ' http://bronyrock.com/track/the-pony-mash ' ,
' file ' : ' 3235767654.mp3 ' ,
' info_dict ' : {
' title ' : ' The Pony Mash ' ,
' uploader ' : ' M_Pallante ' ,
2013-10-27 06:40:25 -07:00
} ,
2014-01-05 16:47:52 -08:00
' skip ' : ' There is a limit of 200 free downloads / month for the test song ' ,
2013-10-27 06:40:25 -07:00
} ,
2013-11-06 07:40:24 -08:00
# embedded brightcove video
2013-11-07 12:06:48 -08:00
# it also tests brightcove videos that need to set the 'Referer' in the
# http requests
2013-11-06 07:40:24 -08:00
{
2014-01-05 16:47:52 -08:00
' add_ie ' : [ ' Brightcove ' ] ,
' url ' : ' http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/ ' ,
' info_dict ' : {
' id ' : ' 2765128793001 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' Le cours de bourse : l’ analyse technique ' ,
' description ' : ' md5:7e9ad046e968cb2d1114004aba466fd9 ' ,
' uploader ' : ' BFM BUSINESS ' ,
2013-11-06 07:40:24 -08:00
} ,
2014-01-05 16:47:52 -08:00
' params ' : {
' skip_download ' : True ,
2013-11-06 07:40:24 -08:00
} ,
} ,
2014-01-27 18:35:32 -08:00
{
# https://github.com/rg3/youtube-dl/issues/2253
' url ' : ' http://bcove.me/i6nfkrc3 ' ,
' file ' : ' 3101154703001.mp4 ' ,
' md5 ' : ' 0ba9446db037002366bab3b3eb30c88c ' ,
' info_dict ' : {
' title ' : ' Still no power ' ,
' uploader ' : ' thestar.com ' ,
' description ' : ' Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs. ' ,
} ,
' add_ie ' : [ ' Brightcove ' ] ,
} ,
2013-12-17 03:33:55 -08:00
# Direct link to a video
{
2014-01-05 16:47:52 -08:00
' url ' : ' http://media.w3.org/2010/05/sintel/trailer.mp4 ' ,
' md5 ' : ' 67d406c2bcb6af27fa886f31aa934bbe ' ,
' info_dict ' : {
' id ' : ' trailer ' ,
2014-02-26 22:21:59 -08:00
' ext ' : ' mp4 ' ,
2014-01-05 16:47:52 -08:00
' title ' : ' trailer ' ,
' upload_date ' : ' 20100513 ' ,
2013-12-17 03:33:55 -08:00
}
2013-12-19 11:28:52 -08:00
} ,
# ooyala video
{
2014-01-05 16:47:52 -08:00
' url ' : ' http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219 ' ,
' md5 ' : ' 5644c6ca5d5782c1d0d350dad9bd840c ' ,
' info_dict ' : {
' id ' : ' BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ ' ,
' ext ' : ' mp4 ' ,
2014-01-20 16:40:34 -08:00
' title ' : ' 2cc213299525360.mov ' , # that's what we get
2013-12-19 11:28:52 -08:00
} ,
} ,
2014-02-26 22:21:59 -08:00
# google redirect
{
' url ' : ' http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http % 3A %2F %2F www.youtube.com %2F watch %3F v % 3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE ' ,
' info_dict ' : {
' id ' : ' cmQHVoWB5FY ' ,
' ext ' : ' mp4 ' ,
' upload_date ' : ' 20130224 ' ,
' uploader_id ' : ' TheVerge ' ,
' description ' : ' Chris Ziegler takes a look at the Alcatel OneTouch Fire and the ZTE Open; two of the first Firefox OS handsets to be officially announced. ' ,
' uploader ' : ' The Verge ' ,
' title ' : ' First Firefox OS phones side-by-side ' ,
} ,
' params ' : {
' skip_download ' : False ,
}
2014-03-05 05:01:53 -08:00
} ,
2014-02-23 16:15:51 -08:00
# embed.ly video
{
' url ' : ' http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/ ' ,
' info_dict ' : {
' id ' : ' 9ODmcdjQcHQ ' ,
' ext ' : ' mp4 ' ,
2014-03-05 05:05:44 -08:00
' title ' : ' Tested: Grinding Coffee at 2000 Frames Per Second ' ,
' upload_date ' : ' 20140225 ' ,
' description ' : ' md5:06a40fbf30b220468f1e0957c0f558ff ' ,
' uploader ' : ' Tested ' ,
' uploader_id ' : ' testedcom ' ,
2014-02-23 16:15:51 -08:00
} ,
# No need to test YoutubeIE here
' params ' : {
' skip_download ' : True ,
} ,
} ,
2014-03-11 08:51:36 -07:00
# funnyordie embed
{
' url ' : ' http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns ' ,
' md5 ' : ' 7cf780be104d40fea7bae52eed4a470e ' ,
' info_dict ' : {
' id ' : ' 18e820ec3f ' ,
' ext ' : ' mp4 ' ,
' title ' : ' Between Two Ferns with Zach Galifianakis: President Barack Obama ' ,
' description ' : ' Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet. ' ,
2014-03-16 12:00:31 -07:00
} ,
2014-03-11 08:51:36 -07:00
} ,
2014-03-16 12:00:31 -07:00
# RUTV embed
{
' url ' : ' http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html ' ,
' info_dict ' : {
' id ' : ' 776940 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' Охотское море стало целиком российским ' ,
' description ' : ' md5:5ed62483b14663e2a95ebbe115eb8f43 ' ,
} ,
' params ' : {
# m3u8 download
' skip_download ' : True ,
} ,
}
2013-07-10 08:49:11 -07:00
]
2013-06-23 11:31:45 -07:00
def report_download_webpage ( self , video_id ) :
""" Report webpage download. """
if not self . _downloader . params . get ( ' test ' , False ) :
2014-01-05 16:47:52 -08:00
self . _downloader . report_warning ( ' Falling back on generic information extractor. ' )
2013-06-23 11:31:45 -07:00
super ( GenericIE , self ) . report_download_webpage ( video_id )
def report_following_redirect ( self , new_url ) :
""" Report information extraction. """
2014-01-05 16:47:52 -08:00
self . _downloader . to_screen ( ' [redirect] Following redirect to %s ' % new_url )
2013-06-23 11:31:45 -07:00
2013-12-17 03:33:55 -08:00
def _send_head ( self , url ) :
2013-06-23 11:31:45 -07:00
""" Check if it is a redirect, like url shorteners, in case return the new url. """
class HEADRedirectHandler ( compat_urllib_request . HTTPRedirectHandler ) :
"""
Subclass the HTTPRedirectHandler to make it use our
2013-12-20 08:05:28 -08:00
HEADRequest also on the redirected URL
2013-06-23 11:31:45 -07:00
"""
def redirect_request ( self , req , fp , code , msg , headers , newurl ) :
if code in ( 301 , 302 , 303 , 307 ) :
newurl = newurl . replace ( ' ' , ' % 20 ' )
newheaders = dict ( ( k , v ) for k , v in req . headers . items ( )
if k . lower ( ) not in ( " content-length " , " content-type " ) )
2013-12-20 08:05:28 -08:00
return HEADRequest ( newurl ,
2013-06-23 11:31:45 -07:00
headers = newheaders ,
origin_req_host = req . get_origin_req_host ( ) ,
unverifiable = True )
else :
raise compat_urllib_error . HTTPError ( req . get_full_url ( ) , code , msg , headers , fp )
class HTTPMethodFallback ( compat_urllib_request . BaseHandler ) :
"""
Fallback to GET if HEAD is not allowed ( 405 HTTP error )
"""
def http_error_405 ( self , req , fp , code , msg , headers ) :
fp . read ( )
fp . close ( )
newheaders = dict ( ( k , v ) for k , v in req . headers . items ( )
if k . lower ( ) not in ( " content-length " , " content-type " ) )
return self . parent . open ( compat_urllib_request . Request ( req . get_full_url ( ) ,
headers = newheaders ,
origin_req_host = req . get_origin_req_host ( ) ,
unverifiable = True ) )
# Build our opener
opener = compat_urllib_request . OpenerDirector ( )
for handler in [ compat_urllib_request . HTTPHandler , compat_urllib_request . HTTPDefaultErrorHandler ,
HTTPMethodFallback , HEADRedirectHandler ,
compat_urllib_request . HTTPErrorProcessor , compat_urllib_request . HTTPSHandler ] :
opener . add_handler ( handler ( ) )
2013-12-20 08:05:28 -08:00
response = opener . open ( HEADRequest ( url ) )
2013-06-23 11:31:45 -07:00
if response is None :
2014-01-05 16:47:52 -08:00
raise ExtractorError ( ' Invalid URL protocol ' )
2013-12-17 03:33:55 -08:00
return response
2013-06-23 11:31:45 -07:00
2014-02-20 04:14:05 -08:00
def _extract_rss ( self , url , video_id , doc ) :
playlist_title = doc . find ( ' ./channel/title ' ) . text
playlist_desc_el = doc . find ( ' ./channel/description ' )
playlist_desc = None if playlist_desc_el is None else playlist_desc_el . text
entries = [ {
' _type ' : ' url ' ,
' url ' : e . find ( ' link ' ) . text ,
' title ' : e . find ( ' title ' ) . text ,
} for e in doc . findall ( ' ./channel/item ' ) ]
return {
' _type ' : ' playlist ' ,
' id ' : url ,
' title ' : playlist_title ,
' description ' : playlist_desc ,
' entries ' : entries ,
}
2013-06-23 11:31:45 -07:00
def _real_extract ( self , url ) :
2013-09-06 09:39:35 -07:00
parsed_url = compat_urlparse . urlparse ( url )
if not parsed_url . scheme :
2014-01-22 05:16:43 -08:00
default_search = self . _downloader . params . get ( ' default_search ' )
if default_search is None :
default_search = ' auto '
if default_search == ' auto ' :
if ' / ' in url :
self . _downloader . report_warning ( ' The url doesn \' t specify the protocol, trying with http ' )
return self . url_result ( ' http:// ' + url )
else :
return self . url_result ( ' ytsearch: ' + url )
else :
assert ' : ' in default_search
return self . url_result ( default_search + url )
2014-03-05 05:02:14 -08:00
video_id = os . path . splitext ( url . rstrip ( ' / ' ) . split ( ' / ' ) [ - 1 ] ) [ 0 ]
2013-09-06 09:39:35 -07:00
2014-01-05 16:47:52 -08:00
self . to_screen ( ' %s : Requesting header ' % video_id )
2013-12-26 23:38:42 -08:00
2013-08-20 19:31:57 -07:00
try :
2013-12-17 03:33:55 -08:00
response = self . _send_head ( url )
# Check for redirect
new_url = response . geturl ( )
if url != new_url :
self . report_following_redirect ( new_url )
2013-12-17 03:04:33 -08:00
return self . url_result ( new_url )
2013-12-17 03:33:55 -08:00
# Check for direct link to a video
content_type = response . headers . get ( ' Content-Type ' , ' ' )
2013-12-17 07:26:32 -08:00
m = re . match ( r ' ^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$ ' , content_type )
2013-12-17 03:33:55 -08:00
if m :
upload_date = response . headers . get ( ' Last-Modified ' )
if upload_date :
upload_date = unified_strdate ( upload_date )
return {
' id ' : video_id ,
' title ' : os . path . splitext ( url_basename ( url ) ) [ 0 ] ,
' formats ' : [ {
' format_id ' : m . group ( ' format_id ' ) ,
' url ' : url ,
2014-01-05 16:47:52 -08:00
' vcodec ' : ' none ' if m . group ( ' type ' ) == ' audio ' else None
2013-12-17 03:33:55 -08:00
} ] ,
' upload_date ' : upload_date ,
}
2013-08-20 19:31:57 -07:00
except compat_urllib_error . HTTPError :
# This may be a stupid server that doesn't like HEAD, our UA, or so
pass
2013-06-23 11:31:45 -07:00
try :
webpage = self . _download_webpage ( url , video_id )
except ValueError :
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
2014-01-05 16:47:52 -08:00
raise ExtractorError ( ' Failed to download URL: %s ' % url )
2013-06-23 11:31:45 -07:00
self . report_extraction ( video_id )
2013-11-18 04:28:26 -08:00
2014-02-20 04:14:05 -08:00
# Is it an RSS feed?
try :
2014-03-10 09:31:32 -07:00
doc = parse_xml ( webpage )
2014-02-20 04:14:05 -08:00
if doc . tag == ' rss ' :
return self . _extract_rss ( url , video_id , doc )
2014-02-21 07:59:10 -08:00
except compat_xml_parse_error :
2014-02-20 04:14:05 -08:00
pass
2013-11-18 04:28:26 -08:00
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
2013-12-06 00:15:04 -08:00
video_title = self . _html_search_regex (
2014-01-05 16:47:52 -08:00
r ' (?s)<title>(.*?)</title> ' , webpage , ' video title ' ,
default = ' video ' )
2013-12-06 00:15:04 -08:00
# video uploader is domain name
video_uploader = self . _search_regex (
2014-01-05 16:47:52 -08:00
r ' ^(?:https?://)?([^/]*)/.* ' , url , ' video uploader ' )
2013-11-18 04:28:26 -08:00
2013-08-26 12:29:31 -07:00
# Look for BrightCove:
2014-02-03 06:19:40 -08:00
bc_urls = BrightcoveIE . _extract_brightcove_urls ( webpage )
if bc_urls :
2014-01-05 16:47:52 -08:00
self . to_screen ( ' Brightcove video detected. ' )
2014-02-03 06:19:40 -08:00
entries = [ {
' _type ' : ' url ' ,
' url ' : smuggle_url ( bc_url , { ' Referer ' : url } ) ,
' ie_key ' : ' Brightcove '
} for bc_url in bc_urls ]
return {
' _type ' : ' playlist ' ,
' title ' : video_title ,
' id ' : video_id ,
' entries ' : entries ,
}
2013-07-10 08:49:11 -07:00
2013-12-21 18:34:13 -08:00
# Look for embedded (iframe) Vimeo player
2013-10-15 03:05:13 -07:00
mobj = re . search (
2014-03-15 10:47:04 -07:00
r ' <iframe[^>]+?src=([ " \' ])(?P<url>(?:https?:)?//player \ .vimeo \ .com/video/.+?) \ 1 ' , webpage )
2013-10-15 03:05:13 -07:00
if mobj :
2014-03-15 10:47:04 -07:00
player_url = unescapeHTML ( mobj . group ( ' url ' ) )
2013-10-15 03:05:13 -07:00
surl = smuggle_url ( player_url , { ' Referer ' : url } )
return self . url_result ( surl , ' Vimeo ' )
2013-12-21 18:34:13 -08:00
# Look for embedded (swf embed) Vimeo player
mobj = re . search (
2014-01-29 13:26:46 -08:00
r ' <embed[^>]+?src= " (https?://(?:www \ .)?vimeo \ .com/moogaloop \ .swf.+?) " ' , webpage )
2013-12-21 18:34:13 -08:00
if mobj :
return self . url_result ( mobj . group ( 1 ) , ' Vimeo ' )
2013-10-18 02:44:57 -07:00
# Look for embedded YouTube player
2013-12-19 11:44:30 -08:00
matches = re . findall ( r ''' (?x)
( ? : < iframe [ ^ > ] + ? src = | embedSWF \( \s * )
( [ " \' ])(?P<url>(?:https?:)?//(?:www \ .)?youtube \ .com/
( ? : embed | v ) / . + ? )
\1 ''' , webpage)
2013-11-18 04:28:26 -08:00
if matches :
urlrs = [ self . url_result ( unescapeHTML ( tuppl [ 1 ] ) , ' Youtube ' )
for tuppl in matches ]
return self . playlist_result (
urlrs , playlist_id = video_id , playlist_title = video_title )
2013-10-18 02:44:57 -07:00
2013-11-30 16:21:33 -08:00
# Look for embedded Dailymotion player
matches = re . findall (
2013-12-06 00:15:04 -08:00
r ' <iframe[^>]+?src=([ " \' ])(?P<url>(?:https?:)?//(?:www \ .)?dailymotion \ .com/embed/video/.+?) \ 1 ' , webpage )
2013-11-30 16:21:33 -08:00
if matches :
urlrs = [ self . url_result ( unescapeHTML ( tuppl [ 1 ] ) , ' Dailymotion ' )
for tuppl in matches ]
return self . playlist_result (
urlrs , playlist_id = video_id , playlist_title = video_title )
2013-12-06 00:15:04 -08:00
# Look for embedded Wistia player
match = re . search (
r ' <iframe[^>]+?src=([ " \' ])(?P<url>(?:https?:)?//(?:fast \ .)?wistia \ .net/embed/iframe/.+?) \ 1 ' , webpage )
if match :
return {
' _type ' : ' url_transparent ' ,
' url ' : unescapeHTML ( match . group ( ' url ' ) ) ,
' ie_key ' : ' Wistia ' ,
' uploader ' : video_uploader ,
' title ' : video_title ,
' id ' : video_id ,
}
2013-12-16 11:08:23 -08:00
# Look for embedded blip.tv player
2013-12-29 21:15:02 -08:00
mobj = re . search ( r ' <meta \ s[^>]*https?://api \ .blip \ .tv/ \ w+/redirect/ \ w+/( \ d+) ' , webpage )
2013-12-16 11:08:23 -08:00
if mobj :
2013-12-29 21:15:02 -08:00
return self . url_result ( ' http://blip.tv/a/a- ' + mobj . group ( 1 ) , ' BlipTV ' )
mobj = re . search ( r ' <(?:iframe|embed|object) \ s[^>]*(https?://(?: \ w+ \ .)?blip \ .tv/(?:play/|api \ .swf#)[a-zA-Z0-9]+) ' , webpage )
2013-12-16 11:08:23 -08:00
if mobj :
2013-12-29 21:15:02 -08:00
return self . url_result ( mobj . group ( 1 ) , ' BlipTV ' )
2013-12-16 11:08:23 -08:00
2013-10-27 06:40:25 -07:00
# Look for Bandcamp pages with custom domain
mobj = re . search ( r ' <meta property= " og:url " [^>]*?content= " (.*?bandcamp \ .com.*?) " ' , webpage )
if mobj is not None :
burl = unescapeHTML ( mobj . group ( 1 ) )
2013-11-22 07:05:14 -08:00
# Don't set the extractor because it can be a track url or an album
return self . url_result ( burl )
2013-10-27 06:40:25 -07:00
2013-12-16 12:45:21 -08:00
# Look for embedded Vevo player
mobj = re . search (
r ' <iframe[^>]+?src=([ " \' ])(?P<url>(?:https?:)?//(?:cache \ .)?vevo \ .com/.+?) \ 1 ' , webpage )
if mobj is not None :
return self . url_result ( mobj . group ( ' url ' ) )
2013-12-19 11:28:52 -08:00
# Look for Ooyala videos
mobj = re . search ( r ' player.ooyala.com/[^ " ?]+ \ ?[^ " ]*?(?:embedCode|ec)=([^ " &]+) ' , webpage )
if mobj is not None :
return OoyalaIE . _build_url_result ( mobj . group ( 1 ) )
2013-12-20 08:05:28 -08:00
# Look for Aparat videos
2014-01-07 02:04:27 -08:00
mobj = re . search ( r ' <iframe src= " (http://www \ .aparat \ .com/video/[^ " ]+) " ' , webpage )
2013-12-20 08:05:28 -08:00
if mobj is not None :
return self . url_result ( mobj . group ( 1 ) , ' Aparat ' )
2014-01-06 23:07:46 -08:00
# Look for MPORA videos
2014-01-29 13:26:46 -08:00
mobj = re . search ( r ' <iframe .*?src= " (http://mpora \ .(?:com|de)/videos/[^ " ]+) " ' , webpage )
2014-01-06 23:07:46 -08:00
if mobj is not None :
return self . url_result ( mobj . group ( 1 ) , ' Mpora ' )
2014-01-07 17:11:46 -08:00
2014-02-24 08:37:42 -08:00
# Look for embedded NovaMov player
2014-01-07 17:07:11 -08:00
mobj = re . search (
r ' <iframe[^>]+?src=([ " \' ])(?P<url>http://(?:(?:embed|www) \ .)?novamov \ .com/embed \ .php.+?) \ 1 ' , webpage )
if mobj is not None :
2014-02-24 08:37:42 -08:00
return self . url_result ( mobj . group ( ' url ' ) , ' NovaMov ' )
# Look for embedded NowVideo player
mobj = re . search (
r ' <iframe[^>]+?src=([ " \' ])(?P<url>http://(?:(?:embed|www) \ .)?nowvideo \ .(?:ch|sx|eu)/embed \ .php.+?) \ 1 ' , webpage )
if mobj is not None :
return self . url_result ( mobj . group ( ' url ' ) , ' NowVideo ' )
2014-01-06 23:07:46 -08:00
2014-01-21 09:10:14 -08:00
# Look for embedded Facebook player
mobj = re . search (
2014-01-26 20:47:30 -08:00
r ' <iframe[^>]+?src=([ " \' ])(?P<url>https://www \ .facebook \ .com/video/embed.+?) \ 1 ' , webpage )
2014-01-21 09:10:14 -08:00
if mobj is not None :
return self . url_result ( mobj . group ( ' url ' ) , ' Facebook ' )
2014-02-28 08:51:54 -08:00
# Look for embedded VK player
mobj = re . search ( r ' <iframe[^>]+?src=([ " \' ])(?P<url>https?://vk \ .com/video_ext \ .php.+?) \ 1 ' , webpage )
if mobj is not None :
return self . url_result ( mobj . group ( ' url ' ) , ' VK ' )
2014-01-26 20:47:30 -08:00
# Look for embedded Huffington Post player
mobj = re . search (
2014-01-29 13:26:46 -08:00
r ' <iframe[^>]+?src=([ " \' ])(?P<url>https?://embed \ .live \ .huffingtonpost \ .com/.+?) \ 1 ' , webpage )
2014-01-26 20:47:30 -08:00
if mobj is not None :
return self . url_result ( mobj . group ( ' url ' ) , ' HuffPost ' )
2014-02-23 16:15:51 -08:00
# Look for embed.ly
mobj = re . search ( r ' class=[ " \' ]embedly-card[ " \' ][^>]href=[ " \' ](?P<url>[^ " \' ]+) ' , webpage )
if mobj is not None :
return self . url_result ( mobj . group ( ' url ' ) )
mobj = re . search ( r ' class=[ " \' ]embedly-embed[ " \' ][^>]src=[ " \' ][^ " \' ]*url=(?P<url>[^&]+) ' , webpage )
if mobj is not None :
return self . url_result ( compat_urllib_parse . unquote ( mobj . group ( ' url ' ) ) )
2014-03-11 08:51:36 -07:00
# Look for funnyordie embed
matches = re . findall ( r ' <iframe[^>]+?src= " (https?://(?:www \ .)?funnyordie \ .com/embed/[^ " ]+) " ' , webpage )
if matches :
urlrs = [ self . url_result ( unescapeHTML ( eurl ) , ' FunnyOrDie ' )
for eurl in matches ]
return self . playlist_result (
urlrs , playlist_id = video_id , playlist_title = video_title )
2014-03-16 12:00:31 -07:00
# Look for embedded RUTV player
rutv_url = RUTVIE . _extract_url ( webpage )
if rutv_url :
return self . url_result ( rutv_url , ' RUTV ' )
2013-06-23 11:31:45 -07:00
# Start with something easy: JW Player in SWFObject
mobj = re . search ( r ' flashvars: [ \' " ](?:.*&)?file=(http[^ \' " &]*) ' , webpage )
2014-01-04 20:34:06 -08:00
if mobj is None :
# Look for gorilla-vid style embedding
2014-01-29 20:29:09 -08:00
mobj = re . search ( r ' (?s)(?:jw_plugins|JWPlayerOptions).*?file \ s*: \ s*[ " \' ](.*?)[ " \' ] ' , webpage )
2013-06-23 11:31:45 -07:00
if mobj is None :
# Broaden the search a little bit
mobj = re . search ( r ' [^A-Za-z0-9]?(?:file|source)=(http[^ \' " &]*) ' , webpage )
if mobj is None :
# Broaden the search a little bit: JWPlayer JS loader
2014-01-16 17:13:00 -08:00
mobj = re . search ( r ' [^A-Za-z0-9]?file[ " \' ]?: \ s*[ " \' ](http(?![^ \' " ]+ \ .[0-9]+[ \' " ])[^ \' " ]+)[ " \' ] ' , webpage )
2013-06-23 11:31:45 -07:00
if mobj is None :
# Try to find twitter cards info
mobj = re . search ( r ' <meta (?:property|name)= " twitter:player:stream " (?:content|value)= " (.+?) " ' , webpage )
if mobj is None :
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re . search ( r ' <meta.*?property= " og:video:type " .*?content= " video/(.*?) " ' , webpage )
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None :
mobj = re . search ( r ' <meta.*?property= " og:video " .*?content= " (.*?) " ' , webpage )
2013-08-20 19:32:22 -07:00
if mobj is None :
# HTML5 video
2013-09-05 09:02:17 -07:00
mobj = re . search ( r ' <video[^<]*(?:>.*?<source.*?)? src= " ([^ " ]+) " ' , webpage , flags = re . DOTALL )
2014-02-26 22:21:59 -08:00
if mobj is None :
mobj = re . search (
r ' (?i)<meta \ s+(?=(?:[a-z-]+= " [^ " ]+ " \ s+)*http-equiv= " refresh " ) '
r ' (?:[a-z-]+= " [^ " ]+ " \ s+)*?content= " [0-9] { ,2};url= \' ([^ \' ]+) \' " ' ,
webpage )
if mobj :
new_url = mobj . group ( 1 )
self . report_following_redirect ( new_url )
return {
' _type ' : ' url ' ,
' url ' : new_url ,
}
2013-06-23 11:31:45 -07:00
if mobj is None :
2014-01-05 16:47:52 -08:00
raise ExtractorError ( ' Unsupported URL: %s ' % url )
2013-06-23 11:31:45 -07:00
# It's possible that one of the regexes
# matched, but returned an empty group:
if mobj . group ( 1 ) is None :
2014-01-05 16:47:52 -08:00
raise ExtractorError ( ' Did not find a valid video URL at %s ' % url )
2013-06-23 11:31:45 -07:00
2013-09-05 09:02:17 -07:00
video_url = mobj . group ( 1 )
2013-08-28 03:47:27 -07:00
video_url = compat_urlparse . urljoin ( url , video_url )
2013-09-05 09:02:17 -07:00
video_id = compat_urllib_parse . unquote ( os . path . basename ( video_url ) )
2013-06-23 11:31:45 -07:00
2014-01-05 16:42:58 -08:00
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE . suitable ( video_url ) :
return self . url_result ( video_url , ' Youtube ' )
2013-06-23 11:31:45 -07:00
# here's a fun little line of code for you:
video_id = os . path . splitext ( video_id ) [ 0 ]
2013-11-24 18:35:52 -08:00
return {
2014-01-05 16:42:58 -08:00
' id ' : video_id ,
' url ' : video_url ,
2013-06-23 11:31:45 -07:00
' uploader ' : video_uploader ,
2014-01-05 16:42:58 -08:00
' title ' : video_title ,
2013-11-24 18:35:52 -08:00
}