2014-02-10 23:55:24 -08:00
from __future__ import unicode_literals
2013-06-23 11:41:54 -07:00
import itertools
2014-04-04 13:22:11 -07:00
import json
2013-06-23 11:13:52 -07:00
import re
2013-06-23 11:41:54 -07:00
from . common import InfoExtractor , SearchInfoExtractor
2013-06-23 11:13:52 -07:00
from . . utils import (
2013-06-23 11:41:54 -07:00
compat_urllib_parse ,
2013-09-28 12:19:52 -07:00
compat_urlparse ,
clean_html ,
2013-12-25 06:18:40 -08:00
int_or_none ,
2013-06-23 11:13:52 -07:00
)
2013-09-28 12:19:52 -07:00
2013-06-23 11:13:52 -07:00
class YahooIE ( InfoExtractor ) :
2014-02-10 23:55:24 -08:00
IE_DESC = ' Yahoo screen '
2014-04-04 13:23:59 -07:00
_VALID_URL = r ' https?://screen \ .yahoo \ .com/.*?-(?P<id>[0-9]+)(?:-[a-z]+)? \ .html '
2013-09-28 12:19:52 -07:00
_TESTS = [
{
2014-02-10 23:55:24 -08:00
' url ' : ' http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html ' ,
' md5 ' : ' 4962b075c08be8690a922ee026d05e69 ' ,
' info_dict ' : {
2014-04-04 09:31:30 -07:00
' id ' : ' 214727115 ' ,
' ext ' : ' mp4 ' ,
2014-02-10 23:55:24 -08:00
' title ' : ' Julian Smith & Travis Legg Watch Julian Smith ' ,
' description ' : ' Julian and Travis watch Julian Smith ' ,
2013-09-28 12:19:52 -07:00
} ,
} ,
{
2014-02-10 23:55:24 -08:00
' url ' : ' http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html ' ,
' md5 ' : ' d6e6fc6e1313c608f316ddad7b82b306 ' ,
' info_dict ' : {
2014-04-04 09:31:30 -07:00
' id ' : ' 103000935 ' ,
' ext ' : ' mp4 ' ,
2014-02-10 23:55:24 -08:00
' title ' : ' Codefellas - The Cougar Lies with Spanish Moss ' ,
' description ' : ' Agent Topple \' s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about? ' ,
2013-09-28 12:19:52 -07:00
} ,
2013-06-27 11:46:46 -07:00
} ,
2013-09-28 12:19:52 -07:00
]
2013-06-23 11:13:52 -07:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
webpage = self . _download_webpage ( url , video_id )
2013-11-27 12:24:55 -08:00
items_json = self . _search_regex ( r ' mediaItems: ( { .*?})$ ' ,
2014-02-10 23:55:24 -08:00
webpage , ' items ' , flags = re . MULTILINE )
2013-09-28 12:19:52 -07:00
items = json . loads ( items_json )
info = items [ ' mediaItems ' ] [ ' query ' ] [ ' results ' ] [ ' mediaObj ' ] [ 0 ]
2013-10-10 12:01:45 -07:00
# The 'meta' field is not always in the video webpage, we request it
# from another page
long_id = info [ ' id ' ]
2013-12-05 05:31:54 -08:00
return self . _get_info ( long_id , video_id )
2013-11-29 06:25:43 -08:00
def _get_info ( self , long_id , video_id ) :
2013-10-10 12:01:45 -07:00
query = ( ' SELECT * FROM yahoo.media.video.streams WHERE id= " %s " '
2013-11-29 13:06:17 -08:00
' AND plrs= " 86Gj0vCaSzV_Iuf6hNylf2 " AND region= " US " '
' AND protocol= " http " ' % long_id )
2013-10-10 12:01:45 -07:00
data = compat_urllib_parse . urlencode ( {
' q ' : query ,
' env ' : ' prod ' ,
' format ' : ' json ' ,
} )
2014-04-04 09:31:30 -07:00
query_result = self . _download_json (
2013-10-10 12:01:45 -07:00
' http://video.query.yahoo.com/v1/public/yql? ' + data ,
2014-02-10 23:55:24 -08:00
video_id , ' Downloading video info ' )
2013-10-10 12:01:45 -07:00
info = query_result [ ' query ' ] [ ' results ' ] [ ' mediaObj ' ] [ 0 ]
2013-09-28 12:19:52 -07:00
meta = info [ ' meta ' ]
formats = [ ]
for s in info [ ' streams ' ] :
format_info = {
2013-12-25 06:18:40 -08:00
' width ' : int_or_none ( s . get ( ' width ' ) ) ,
' height ' : int_or_none ( s . get ( ' height ' ) ) ,
' tbr ' : int_or_none ( s . get ( ' bitrate ' ) ) ,
2013-09-28 12:19:52 -07:00
}
host = s [ ' host ' ]
path = s [ ' path ' ]
if host . startswith ( ' rtmp ' ) :
format_info . update ( {
' url ' : host ,
' play_path ' : path ,
' ext ' : ' flv ' ,
} )
else :
format_url = compat_urlparse . urljoin ( host , path )
format_info [ ' url ' ] = format_url
formats . append ( format_info )
2013-12-25 06:18:40 -08:00
self . _sort_formats ( formats )
2013-09-28 12:19:52 -07:00
2013-11-27 12:24:55 -08:00
return {
2013-09-28 12:19:52 -07:00
' id ' : video_id ,
' title ' : meta [ ' title ' ] ,
' formats ' : formats ,
' description ' : clean_html ( meta [ ' description ' ] ) ,
' thumbnail ' : meta [ ' thumbnail ' ] ,
}
2013-06-23 11:13:52 -07:00
2013-06-23 11:41:54 -07:00
2013-11-29 06:25:43 -08:00
class YahooNewsIE ( YahooIE ) :
IE_NAME = ' yahoo:news '
_VALID_URL = r ' http://news \ .yahoo \ .com/video/.*?-(?P<id> \ d*?) \ .html '
_TEST = {
2014-02-10 23:55:24 -08:00
' url ' : ' http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html ' ,
' md5 ' : ' 67010fdf3a08d290e060a4dd96baa07b ' ,
' info_dict ' : {
' id ' : ' 104538833 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' China Moses Is Crazy About the Blues ' ,
' description ' : ' md5:9900ab8cd5808175c7b3fe55b979bed0 ' ,
2013-11-29 06:25:43 -08:00
} ,
}
# Overwrite YahooIE properties we don't want
_TESTS = [ ]
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
webpage = self . _download_webpage ( url , video_id )
2014-02-10 23:55:24 -08:00
long_id = self . _search_regex ( r ' contentId: \' (.+?) \' , ' , webpage , ' long id ' )
2013-11-29 06:25:43 -08:00
return self . _get_info ( long_id , video_id )
2013-06-23 11:41:54 -07:00
class YahooSearchIE ( SearchInfoExtractor ) :
2014-02-10 23:55:24 -08:00
IE_DESC = ' Yahoo screen search '
2013-06-23 11:41:54 -07:00
_MAX_RESULTS = 1000
2014-02-10 23:55:24 -08:00
IE_NAME = ' screen.yahoo:search '
2013-06-23 11:41:54 -07:00
_SEARCH_KEY = ' yvsearch '
def _get_n_results ( self , query , n ) :
""" Get a specified number of results for a query """
2014-04-04 09:31:30 -07:00
entries = [ ]
for pagenum in itertools . count ( 0 ) :
2014-02-10 23:55:24 -08:00
result_url = ' http://video.search.yahoo.com/search/?p= %s &fr=screen&o=js&gs=0&b= %d ' % ( compat_urllib_parse . quote_plus ( query ) , pagenum * 30 )
2014-04-04 09:31:30 -07:00
info = self . _download_json ( result_url , query ,
note = ' Downloading results page ' + str ( pagenum + 1 ) )
2014-02-10 23:55:24 -08:00
m = info [ ' m ' ]
results = info [ ' results ' ]
2013-06-23 11:41:54 -07:00
for ( i , r ) in enumerate ( results ) :
2014-04-04 09:31:30 -07:00
if ( pagenum * 30 ) + i > = n :
2013-06-23 11:41:54 -07:00
break
mobj = re . search ( r ' (?P<url>screen \ .yahoo \ .com/.*?- \ d*? \ .html) " ' , r )
e = self . url_result ( ' http:// ' + mobj . group ( ' url ' ) , ' Yahoo ' )
2014-04-04 09:31:30 -07:00
entries . append ( e )
if ( pagenum * 30 + i > = n ) or ( m [ ' last ' ] > = ( m [ ' total ' ] - 1 ) ) :
2013-06-23 11:41:54 -07:00
break
2014-04-04 09:31:30 -07:00
return {
' _type ' : ' playlist ' ,
' id ' : query ,
' entries ' : entries ,
}