Compare commits
82 Commits
2014.10.26
...
2014.11.02
Author | SHA1 | Date | |
---|---|---|---|
c30ae9594c | |||
ffae28ae18 | |||
d9116714f2 | |||
08965906a8 | |||
5263cdfcf9 | |||
b2a68d14cf | |||
6e1cff9c33 | |||
72975729c8 | |||
d319948b6a | |||
9a4bf889f9 | |||
2a834bdb21 | |||
0d2c141865 | |||
5ec39d8b96 | |||
7b6de3728a | |||
a51d3aa001 | |||
2c8e03d937 | |||
fbb21cf528 | |||
b8a618f898 | |||
feb74960eb | |||
d65d628613 | |||
ac645ac7d0 | |||
7d11297f3f | |||
6ad4013d40 | |||
dbd1283d31 | |||
c451d4f553 | |||
8abec2c8bb | |||
a9bad429b3 | |||
50c8266ef0 | |||
00edd4f9be | |||
ee966928af | |||
e5193599ec | |||
01d663bca3 | |||
e0c51cdadc | |||
9334f8f17a | |||
632256d9ec | |||
03df7baa6a | |||
3511266bc3 | |||
9fdece5d34 | |||
bbf1092ad0 | |||
9ef55c5bbc | |||
48a24ab746 | |||
68acdbda9d | |||
27c542c06f | |||
aaa399d2f6 | |||
b2e6a1c14c | |||
8cc3eba79a | |||
b0fb6d4db1 | |||
81515ad9f6 | |||
8112d4b284 | |||
bf7aa6301b | |||
aea856621f | |||
f24a5a2faa | |||
ecfe623422 | |||
4a6c94288a | |||
10e3d73472 | |||
15956b5aa1 | |||
586f7082ef | |||
d6d9186f0d | |||
2e9ff8f362 | |||
6407432333 | |||
f744c0f398 | |||
249efaf44b | |||
8d32abff9e | |||
94f052cbf4 | |||
446a03bd96 | |||
6009b69f81 | |||
3d6047113c | |||
9dec99303d | |||
7706927370 | |||
3adba6fa2a | |||
f46a8702cc | |||
8d11b59bbb | |||
cf501a23d2 | |||
2bcae58d46 | |||
c9f08154a3 | |||
526b276fd7 | |||
77ec444d9a | |||
bfc2bedcfc | |||
83855f3a1f | |||
50b51830fb | |||
bfd91588f3 | |||
3741302a10 |
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish
|
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part
|
||||||
|
|
||||||
cleanall: clean
|
cleanall: clean
|
||||||
rm -f youtube-dl youtube-dl.exe
|
rm -f youtube-dl youtube-dl.exe
|
||||||
|
16
README.md
16
README.md
@ -381,7 +381,7 @@ Again, from then on you'll be able to update with `sudo youtube-dl -U`.
|
|||||||
|
|
||||||
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
||||||
|
|
||||||
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version. See above for a way to update.
|
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging guys](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
|
||||||
|
|
||||||
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
|
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
|
||||||
|
|
||||||
@ -511,6 +511,20 @@ If you want to add support for a new site, you can follow this quick list (assum
|
|||||||
|
|
||||||
In any case, thank you very much for your contributions!
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
|
# EMBEDDING YOUTUBE-DL
|
||||||
|
|
||||||
|
youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/rg3/youtube-dl/issues/new).
|
||||||
|
|
||||||
|
From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
|
||||||
|
|
||||||
|
import youtube_dl
|
||||||
|
|
||||||
|
ydl_opts = {}
|
||||||
|
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||||
|
|
||||||
|
Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
||||||
|
|
||||||
# BUGS
|
# BUGS
|
||||||
|
|
||||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.
|
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.
|
||||||
|
@ -44,8 +44,8 @@ copyright = u'2014, Ricardo Garcia Gonzalez'
|
|||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
import youtube_dl
|
from youtube_dl.version import __version__
|
||||||
version = youtube_dl.__version__
|
version = __version__
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = version
|
release = version
|
||||||
|
|
||||||
|
@ -185,7 +185,9 @@ def generator(test_case):
|
|||||||
md5_for_file = _file_md5(tc_filename)
|
md5_for_file = _file_md5(tc_filename)
|
||||||
self.assertEqual(md5_for_file, tc['md5'])
|
self.assertEqual(md5_for_file, tc['md5'])
|
||||||
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
||||||
self.assertTrue(os.path.exists(info_json_fn))
|
self.assertTrue(
|
||||||
|
os.path.exists(info_json_fn),
|
||||||
|
'Missing info file %s' % info_json_fn)
|
||||||
with io.open(info_json_fn, encoding='utf-8') as infof:
|
with io.open(info_json_fn, encoding='utf-8') as infof:
|
||||||
info_dict = json.load(infof)
|
info_dict = json.load(infof)
|
||||||
|
|
||||||
|
@ -289,6 +289,7 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
||||||
|
|
||||||
def test_strip_jsonp(self):
|
def test_strip_jsonp(self):
|
||||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||||
@ -360,12 +361,14 @@ class TestUtil(unittest.TestCase):
|
|||||||
|
|
||||||
def test_compat_getenv(self):
|
def test_compat_getenv(self):
|
||||||
test_str = 'тест'
|
test_str = 'тест'
|
||||||
os.environ['YOUTUBE-DL-TEST'] = test_str.encode(get_filesystem_encoding())
|
os.environ['YOUTUBE-DL-TEST'] = (test_str if sys.version_info >= (3, 0)
|
||||||
|
else test_str.encode(get_filesystem_encoding()))
|
||||||
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
||||||
|
|
||||||
def test_compat_expanduser(self):
|
def test_compat_expanduser(self):
|
||||||
test_str = 'C:\Documents and Settings\тест\Application Data'
|
test_str = 'C:\Documents and Settings\тест\Application Data'
|
||||||
os.environ['HOME'] = test_str.encode(get_filesystem_encoding())
|
os.environ['HOME'] = (test_str if sys.version_info >= (3, 0)
|
||||||
|
else test_str.encode(get_filesystem_encoding()))
|
||||||
self.assertEqual(compat_expanduser('~'), test_str)
|
self.assertEqual(compat_expanduser('~'), test_str)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -189,7 +189,7 @@ class YoutubeDL(object):
|
|||||||
_num_downloads = None
|
_num_downloads = None
|
||||||
_screen_file = None
|
_screen_file = None
|
||||||
|
|
||||||
def __init__(self, params=None):
|
def __init__(self, params=None, auto_init=True):
|
||||||
"""Create a FileDownloader object with the given options."""
|
"""Create a FileDownloader object with the given options."""
|
||||||
if params is None:
|
if params is None:
|
||||||
params = {}
|
params = {}
|
||||||
@ -246,6 +246,10 @@ class YoutubeDL(object):
|
|||||||
|
|
||||||
self._setup_opener()
|
self._setup_opener()
|
||||||
|
|
||||||
|
if auto_init:
|
||||||
|
self.print_debug_header()
|
||||||
|
self.add_default_info_extractors()
|
||||||
|
|
||||||
def add_info_extractor(self, ie):
|
def add_info_extractor(self, ie):
|
||||||
"""Add an InfoExtractor object to the end of the list."""
|
"""Add an InfoExtractor object to the end of the list."""
|
||||||
self._ies.append(ie)
|
self._ies.append(ie)
|
||||||
@ -1207,6 +1211,8 @@ class YoutubeDL(object):
|
|||||||
res += 'video@'
|
res += 'video@'
|
||||||
if fdict.get('vbr') is not None:
|
if fdict.get('vbr') is not None:
|
||||||
res += '%4dk' % fdict['vbr']
|
res += '%4dk' % fdict['vbr']
|
||||||
|
if fdict.get('fps') is not None:
|
||||||
|
res += ', %sfps' % fdict['fps']
|
||||||
if fdict.get('acodec') is not None:
|
if fdict.get('acodec') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += ', '
|
res += ', '
|
||||||
|
@ -293,9 +293,6 @@ def _real_main(argv=None):
|
|||||||
}
|
}
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
ydl.print_debug_header()
|
|
||||||
ydl.add_default_info_extractors()
|
|
||||||
|
|
||||||
# PostProcessors
|
# PostProcessors
|
||||||
# Add the metadata pp first, the other pps will copy it
|
# Add the metadata pp first, the other pps will copy it
|
||||||
if opts.addmetadata:
|
if opts.addmetadata:
|
||||||
|
@ -244,9 +244,16 @@ class F4mFD(FileDownloader):
|
|||||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
||||||
|
|
||||||
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
||||||
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
|
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
||||||
|
if bootstrap_node.text is None:
|
||||||
|
bootstrap_url = compat_urlparse.urljoin(
|
||||||
|
base_url, bootstrap_node.attrib['url'])
|
||||||
|
bootstrap = self.ydl.urlopen(bootstrap_url).read()
|
||||||
|
else:
|
||||||
|
bootstrap = base64.b64decode(bootstrap_node.text)
|
||||||
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
|
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
|
||||||
boot_info = read_bootstrap_info(bootstrap)
|
boot_info = read_bootstrap_info(bootstrap)
|
||||||
|
|
||||||
fragments_list = build_fragments_list(boot_info)
|
fragments_list = build_fragments_list(boot_info)
|
||||||
if self.params.get('test', False):
|
if self.params.get('test', False):
|
||||||
# We only download the first fragment
|
# We only download the first fragment
|
||||||
|
@ -67,7 +67,6 @@ from .crunchyroll import (
|
|||||||
CrunchyrollShowPlaylistIE
|
CrunchyrollShowPlaylistIE
|
||||||
)
|
)
|
||||||
from .cspan import CSpanIE
|
from .cspan import CSpanIE
|
||||||
from .d8 import D8IE
|
|
||||||
from .dailymotion import (
|
from .dailymotion import (
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
DailymotionPlaylistIE,
|
DailymotionPlaylistIE,
|
||||||
@ -189,6 +188,7 @@ from .kontrtube import KontrTubeIE
|
|||||||
from .krasview import KrasViewIE
|
from .krasview import KrasViewIE
|
||||||
from .ku6 import Ku6IE
|
from .ku6 import Ku6IE
|
||||||
from .la7 import LA7IE
|
from .la7 import LA7IE
|
||||||
|
from .laola1tv import Laola1TvIE
|
||||||
from .lifenews import LifeNewsIE
|
from .lifenews import LifeNewsIE
|
||||||
from .liveleak import LiveLeakIE
|
from .liveleak import LiveLeakIE
|
||||||
from .livestream import (
|
from .livestream import (
|
||||||
@ -251,7 +251,7 @@ from .newstube import NewstubeIE
|
|||||||
from .nfb import NFBIE
|
from .nfb import NFBIE
|
||||||
from .nfl import NFLIE
|
from .nfl import NFLIE
|
||||||
from .nhl import NHLIE, NHLVideocenterIE
|
from .nhl import NHLIE, NHLVideocenterIE
|
||||||
from .niconico import NiconicoIE
|
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
||||||
from .ninegag import NineGagIE
|
from .ninegag import NineGagIE
|
||||||
from .noco import NocoIE
|
from .noco import NocoIE
|
||||||
from .normalboots import NormalbootsIE
|
from .normalboots import NormalbootsIE
|
||||||
@ -280,6 +280,7 @@ from .orf import (
|
|||||||
from .parliamentliveuk import ParliamentLiveUKIE
|
from .parliamentliveuk import ParliamentLiveUKIE
|
||||||
from .patreon import PatreonIE
|
from .patreon import PatreonIE
|
||||||
from .pbs import PBSIE
|
from .pbs import PBSIE
|
||||||
|
from .phoenix import PhoenixIE
|
||||||
from .photobucket import PhotobucketIE
|
from .photobucket import PhotobucketIE
|
||||||
from .planetaplay import PlanetaPlayIE
|
from .planetaplay import PlanetaPlayIE
|
||||||
from .played import PlayedIE
|
from .played import PlayedIE
|
||||||
@ -293,6 +294,7 @@ from .pornoxo import PornoXOIE
|
|||||||
from .promptfile import PromptFileIE
|
from .promptfile import PromptFileIE
|
||||||
from .prosiebensat1 import ProSiebenSat1IE
|
from .prosiebensat1 import ProSiebenSat1IE
|
||||||
from .pyvideo import PyvideoIE
|
from .pyvideo import PyvideoIE
|
||||||
|
from .quickvid import QuickVidIE
|
||||||
from .radiofrance import RadioFranceIE
|
from .radiofrance import RadioFranceIE
|
||||||
from .rai import RaiIE
|
from .rai import RaiIE
|
||||||
from .rbmaradio import RBMARadioIE
|
from .rbmaradio import RBMARadioIE
|
||||||
@ -355,6 +357,7 @@ from .spike import SpikeIE
|
|||||||
from .sport5 import Sport5IE
|
from .sport5 import Sport5IE
|
||||||
from .sportbox import SportBoxIE
|
from .sportbox import SportBoxIE
|
||||||
from .sportdeutschland import SportDeutschlandIE
|
from .sportdeutschland import SportDeutschlandIE
|
||||||
|
from .srmediathek import SRMediathekIE
|
||||||
from .stanfordoc import StanfordOpenClassroomIE
|
from .stanfordoc import StanfordOpenClassroomIE
|
||||||
from .steam import SteamIE
|
from .steam import SteamIE
|
||||||
from .streamcloud import StreamcloudIE
|
from .streamcloud import StreamcloudIE
|
||||||
|
@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .generic import GenericIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -12,6 +13,7 @@ from ..utils import (
|
|||||||
parse_duration,
|
parse_duration,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
|
parse_xml,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -54,6 +56,11 @@ class ARDMediathekIE(InfoExtractor):
|
|||||||
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
|
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
|
||||||
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
|
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
|
||||||
|
|
||||||
|
if re.search(r'[\?&]rss($|[=&])', url):
|
||||||
|
doc = parse_xml(webpage)
|
||||||
|
if doc.tag == 'rss':
|
||||||
|
return GenericIE()._extract_rss(url, video_id, doc)
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
||||||
r'<meta name="dcterms.title" content="(.*?)"/>',
|
r'<meta name="dcterms.title" content="(.*?)"/>',
|
||||||
|
@ -4,7 +4,7 @@ from __future__ import unicode_literals
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .soundcloud import SoundcloudIE
|
from .soundcloud import SoundcloudIE
|
||||||
from ..utils import ExtractorError
|
from ..utils import ExtractorError
|
||||||
import datetime
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,8 +24,7 @@ class AUEngineIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')
|
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -7,15 +7,21 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
url_basename,
|
url_basename,
|
||||||
|
qualities,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class CanalplusIE(InfoExtractor):
|
class CanalplusIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
|
IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
|
||||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
|
_VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
|
||||||
IE_NAME = 'canalplus.fr'
|
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
|
||||||
|
_SITE_ID_MAP = {
|
||||||
|
'canalplus.fr': 'cplus',
|
||||||
|
'piwiplus.fr': 'teletoon',
|
||||||
|
'd8.tv': 'd8',
|
||||||
|
}
|
||||||
|
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
|
'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
|
||||||
'md5': '3db39fb48b9685438ecf33a1078023e4',
|
'md5': '3db39fb48b9685438ecf33a1078023e4',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -25,36 +31,73 @@ class CanalplusIE(InfoExtractor):
|
|||||||
'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
|
'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
|
||||||
'upload_date': '20130826',
|
'upload_date': '20130826',
|
||||||
},
|
},
|
||||||
}
|
}, {
|
||||||
|
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1108190',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Le labyrinthe - Boing super ranger',
|
||||||
|
'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
|
||||||
|
'upload_date': '20140724',
|
||||||
|
},
|
||||||
|
'skip': 'Only works from France',
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '966289',
|
||||||
|
'ext': 'flv',
|
||||||
|
'title': 'Campagne intime - Documentaire exceptionnel',
|
||||||
|
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
|
||||||
|
'upload_date': '20131108',
|
||||||
|
},
|
||||||
|
'skip': 'videos get deleted after a while',
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
video_id = mobj.groupdict().get('id')
|
video_id = mobj.groupdict().get('id')
|
||||||
|
|
||||||
|
site_id = self._SITE_ID_MAP[mobj.group('site') or 'canal']
|
||||||
|
|
||||||
# Beware, some subclasses do not define an id group
|
# Beware, some subclasses do not define an id group
|
||||||
display_id = url_basename(mobj.group('path'))
|
display_id = url_basename(mobj.group('path'))
|
||||||
|
|
||||||
if video_id is None:
|
if video_id is None:
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, 'video id')
|
video_id = self._search_regex(
|
||||||
|
r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
|
||||||
|
|
||||||
info_url = self._VIDEO_INFO_TEMPLATE % video_id
|
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
|
||||||
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
|
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
|
||||||
|
|
||||||
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
||||||
media = video_info.find('MEDIA')
|
media = video_info.find('MEDIA')
|
||||||
infos = video_info.find('INFOS')
|
infos = video_info.find('INFOS')
|
||||||
|
|
||||||
preferences = ['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS']
|
preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
|
||||||
|
|
||||||
formats = [
|
formats = []
|
||||||
{
|
for fmt in media.find('VIDEOS'):
|
||||||
'url': fmt.text + '?hdcore=2.11.3' if fmt.tag == 'HDS' else fmt.text,
|
format_url = fmt.text
|
||||||
'format_id': fmt.tag,
|
if not format_url:
|
||||||
'ext': 'mp4' if fmt.tag == 'HLS' else 'flv',
|
continue
|
||||||
'preference': preferences.index(fmt.tag) if fmt.tag in preferences else -1,
|
format_id = fmt.tag
|
||||||
} for fmt in media.find('VIDEOS') if fmt.text
|
if format_id == 'HLS':
|
||||||
]
|
hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
|
||||||
|
for fmt in hls_formats:
|
||||||
|
fmt['preference'] = preference(format_id)
|
||||||
|
formats.extend(hls_formats)
|
||||||
|
elif format_id == 'HDS':
|
||||||
|
hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
|
||||||
|
for fmt in hds_formats:
|
||||||
|
fmt['preference'] = preference(format_id)
|
||||||
|
formats.extend(hds_formats)
|
||||||
|
else:
|
||||||
|
formats.append({
|
||||||
|
'url': format_url,
|
||||||
|
'format_id': format_id,
|
||||||
|
'preference': preference(format_id),
|
||||||
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -59,12 +59,9 @@ class CinemassacreIE(InfoExtractor):
|
|||||||
|
|
||||||
vidurl = self._search_regex(
|
vidurl = self._search_regex(
|
||||||
r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
|
r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
|
||||||
vidid = self._search_regex(
|
|
||||||
r'\'vidid\'\s*:\s*"([^\']+)"', playerdata, 'vidid')
|
|
||||||
videoserver = self._html_search_regex(
|
|
||||||
r"'videoserver'\s*:\s*'([^']+)'", playerdata, 'videoserver')
|
|
||||||
|
|
||||||
videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
|
videolist_url = self._search_regex(
|
||||||
|
r"file\s*:\s*'(http.+?/jwplayer\.smil)'", playerdata, 'jwplayer.smil')
|
||||||
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
@ -4,7 +4,6 @@ import json
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import int_or_none
|
|
||||||
|
|
||||||
|
|
||||||
_translation_table = {
|
_translation_table = {
|
||||||
@ -39,9 +38,7 @@ class CliphunterIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_title = self._search_regex(
|
video_title = self._search_regex(
|
||||||
|
@ -72,6 +72,7 @@ class InfoExtractor(object):
|
|||||||
* acodec Name of the audio codec in use
|
* acodec Name of the audio codec in use
|
||||||
* asr Audio sampling rate in Hertz
|
* asr Audio sampling rate in Hertz
|
||||||
* vbr Average video bitrate in KBit/s
|
* vbr Average video bitrate in KBit/s
|
||||||
|
* fps Frame rate
|
||||||
* vcodec Name of the video codec in use
|
* vcodec Name of the video codec in use
|
||||||
* container Name of the container format
|
* container Name of the container format
|
||||||
* filesize The number of bytes, if known in advance
|
* filesize The number of bytes, if known in advance
|
||||||
@ -618,6 +619,7 @@ class InfoExtractor(object):
|
|||||||
f.get('vbr') if f.get('vbr') is not None else -1,
|
f.get('vbr') if f.get('vbr') is not None else -1,
|
||||||
f.get('abr') if f.get('abr') is not None else -1,
|
f.get('abr') if f.get('abr') is not None else -1,
|
||||||
audio_ext_preference,
|
audio_ext_preference,
|
||||||
|
f.get('fps') if f.get('fps') is not None else -1,
|
||||||
f.get('filesize') if f.get('filesize') is not None else -1,
|
f.get('filesize') if f.get('filesize') is not None else -1,
|
||||||
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
|
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
|
||||||
f.get('source_preference') if f.get('source_preference') is not None else -1,
|
f.get('source_preference') if f.get('source_preference') is not None else -1,
|
||||||
@ -689,7 +691,10 @@ class InfoExtractor(object):
|
|||||||
if re.match(r'^https?://', u)
|
if re.match(r'^https?://', u)
|
||||||
else compat_urlparse.urljoin(m3u8_url, u))
|
else compat_urlparse.urljoin(m3u8_url, u))
|
||||||
|
|
||||||
m3u8_doc = self._download_webpage(m3u8_url, video_id)
|
m3u8_doc = self._download_webpage(
|
||||||
|
m3u8_url, video_id,
|
||||||
|
note='Downloading m3u8 information',
|
||||||
|
errnote='Failed to download m3u8 information')
|
||||||
last_info = None
|
last_info = None
|
||||||
kv_rex = re.compile(
|
kv_rex = re.compile(
|
||||||
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
|
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
|
||||||
|
@ -109,19 +109,17 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
|
|||||||
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
|
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
|
||||||
return zlib.decompress(decrypted_data)
|
return zlib.decompress(decrypted_data)
|
||||||
|
|
||||||
def _convert_subtitles_to_srt(self, subtitles):
|
def _convert_subtitles_to_srt(self, sub_root):
|
||||||
output = ''
|
output = ''
|
||||||
for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1):
|
|
||||||
start = start.replace('.', ',')
|
for i, event in enumerate(sub_root.findall('./events/event'), 1):
|
||||||
end = end.replace('.', ',')
|
start = event.attrib['start'].replace('.', ',')
|
||||||
text = clean_html(text)
|
end = event.attrib['end'].replace('.', ',')
|
||||||
text = text.replace('\\N', '\n')
|
text = event.attrib['text'].replace('\\N', '\n')
|
||||||
if not text:
|
|
||||||
continue
|
|
||||||
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def _convert_subtitles_to_ass(self, subtitles):
|
def _convert_subtitles_to_ass(self, sub_root):
|
||||||
output = ''
|
output = ''
|
||||||
|
|
||||||
def ass_bool(strvalue):
|
def ass_bool(strvalue):
|
||||||
@ -130,10 +128,6 @@ class CrunchyrollIE(SubtitlesInfoExtractor):
|
|||||||
assvalue = '-1'
|
assvalue = '-1'
|
||||||
return assvalue
|
return assvalue
|
||||||
|
|
||||||
sub_root = xml.etree.ElementTree.fromstring(subtitles)
|
|
||||||
if not sub_root:
|
|
||||||
return output
|
|
||||||
|
|
||||||
output = '[Script Info]\n'
|
output = '[Script Info]\n'
|
||||||
output += 'Title: %s\n' % sub_root.attrib["title"]
|
output += 'Title: %s\n' % sub_root.attrib["title"]
|
||||||
output += 'ScriptType: v4.00+\n'
|
output += 'ScriptType: v4.00+\n'
|
||||||
@ -270,10 +264,13 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
|||||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||||
if not lang_code:
|
if not lang_code:
|
||||||
continue
|
continue
|
||||||
|
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||||
|
if not sub_root:
|
||||||
|
subtitles[lang_code] = ''
|
||||||
if sub_format == 'ass':
|
if sub_format == 'ass':
|
||||||
subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle)
|
subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
|
||||||
else:
|
else:
|
||||||
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root)
|
||||||
|
|
||||||
if self._downloader.params.get('listsubtitles', False):
|
if self._downloader.params.get('listsubtitles', False):
|
||||||
self._list_available_subtitles(video_id, subtitles)
|
self._list_available_subtitles(video_id, subtitles)
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
# encoding: utf-8
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from .canalplus import CanalplusIE
|
|
||||||
|
|
||||||
|
|
||||||
class D8IE(CanalplusIE):
|
|
||||||
_VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
|
|
||||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
|
|
||||||
IE_NAME = 'd8.tv'
|
|
||||||
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
|
||||||
'file': '966289.flv',
|
|
||||||
'info_dict': {
|
|
||||||
'title': 'Campagne intime - Documentaire exceptionnel',
|
|
||||||
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
|
|
||||||
'upload_date': '20131108',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# rtmp
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'videos get deleted after a while',
|
|
||||||
}
|
|
@ -1,7 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .subtitles import SubtitlesInfoExtractor
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
from .common import ExtractorError
|
from .common import ExtractorError
|
||||||
from ..utils import parse_iso8601
|
from ..utils import parse_iso8601
|
||||||
@ -25,8 +23,7 @@ class DRTVIE(SubtitlesInfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
programcard = self._download_json(
|
programcard = self._download_json(
|
||||||
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON')
|
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON')
|
||||||
@ -35,7 +32,7 @@ class DRTVIE(SubtitlesInfoExtractor):
|
|||||||
|
|
||||||
title = data['Title']
|
title = data['Title']
|
||||||
description = data['Description']
|
description = data['Description']
|
||||||
timestamp = parse_iso8601(data['CreatedTime'][:-5])
|
timestamp = parse_iso8601(data['CreatedTime'])
|
||||||
|
|
||||||
thumbnail = None
|
thumbnail = None
|
||||||
duration = None
|
duration = None
|
||||||
|
@ -1,49 +1,48 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
import re
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
|
||||||
determine_ext,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class FazIE(InfoExtractor):
|
class FazIE(InfoExtractor):
|
||||||
IE_NAME = u'faz.net'
|
IE_NAME = 'faz.net'
|
||||||
_VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+)\.html'
|
_VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+)\.html'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
|
'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
|
||||||
u'file': u'12610585.mp4',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '12610585',
|
||||||
u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
|
'ext': 'mp4',
|
||||||
u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
|
'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
|
||||||
|
'description': 'md5:1453fbf9a0d041d985a47306192ea253',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
self.to_screen(video_id)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
|
config_xml_url = self._search_regex(
|
||||||
u'config xml url')
|
r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
|
||||||
config = self._download_xml(config_xml_url, video_id,
|
config = self._download_xml(
|
||||||
u'Downloading config xml')
|
config_xml_url, video_id, 'Downloading config xml')
|
||||||
|
|
||||||
encodings = config.find('ENCODINGS')
|
encodings = config.find('ENCODINGS')
|
||||||
formats = []
|
formats = []
|
||||||
for code in ['LOW', 'HIGH', 'HQ']:
|
for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
|
||||||
encoding = encodings.find(code)
|
encoding = encodings.find(code)
|
||||||
if encoding is None:
|
if encoding is None:
|
||||||
continue
|
continue
|
||||||
encoding_url = encoding.find('FILENAME').text
|
encoding_url = encoding.find('FILENAME').text
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': encoding_url,
|
'url': encoding_url,
|
||||||
'ext': determine_ext(encoding_url),
|
|
||||||
'format_id': code.lower(),
|
'format_id': code.lower(),
|
||||||
|
'quality': pref,
|
||||||
})
|
})
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
|
descr = self._html_search_regex(
|
||||||
|
r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False)
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': self._og_search_title(webpage),
|
'title': self._og_search_title(webpage),
|
||||||
|
@ -1,25 +1,27 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import random
|
import random
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
|
||||||
get_element_by_id,
|
get_element_by_id,
|
||||||
clean_html,
|
clean_html,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class FKTVIE(InfoExtractor):
|
class FKTVIE(InfoExtractor):
|
||||||
IE_NAME = u'fernsehkritik.tv'
|
IE_NAME = 'fernsehkritik.tv'
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
|
_VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://fernsehkritik.tv/folge-1',
|
'url': 'http://fernsehkritik.tv/folge-1',
|
||||||
u'file': u'00011.flv',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '00011',
|
||||||
u'title': u'Folge 1 vom 10. April 2007',
|
'ext': 'flv',
|
||||||
u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f',
|
'title': 'Folge 1 vom 10. April 2007',
|
||||||
|
'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,7 +34,7 @@ class FKTVIE(InfoExtractor):
|
|||||||
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
||||||
episode)
|
episode)
|
||||||
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
||||||
u'playlist', flags=re.DOTALL)
|
'playlist', flags=re.DOTALL)
|
||||||
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
||||||
# TODO: return a single multipart video
|
# TODO: return a single multipart video
|
||||||
videos = []
|
videos = []
|
||||||
@ -42,7 +44,6 @@ class FKTVIE(InfoExtractor):
|
|||||||
videos.append({
|
videos.append({
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': determine_ext(video_url),
|
|
||||||
'title': clean_html(get_element_by_id('eptitle', start_webpage)),
|
'title': clean_html(get_element_by_id('eptitle', start_webpage)),
|
||||||
'description': clean_html(get_element_by_id('contentlist', start_webpage)),
|
'description': clean_html(get_element_by_id('contentlist', start_webpage)),
|
||||||
'thumbnail': video_thumbnail
|
'thumbnail': video_thumbnail
|
||||||
@ -51,14 +52,15 @@ class FKTVIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class FKTVPosteckeIE(InfoExtractor):
|
class FKTVPosteckeIE(InfoExtractor):
|
||||||
IE_NAME = u'fernsehkritik.tv:postecke'
|
IE_NAME = 'fernsehkritik.tv:postecke'
|
||||||
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
|
_VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
|
'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
|
||||||
u'file': u'0120.flv',
|
'md5': '262f0adbac80317412f7e57b4808e5c4',
|
||||||
u'md5': u'262f0adbac80317412f7e57b4808e5c4',
|
'info_dict': {
|
||||||
u'info_dict': {
|
'id': '0120',
|
||||||
u"title": u"Postecke 120"
|
'ext': 'flv',
|
||||||
|
'title': 'Postecke 120',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,8 +73,7 @@ class FKTVPosteckeIE(InfoExtractor):
|
|||||||
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
||||||
video_title = 'Postecke %d' % episode
|
video_title = 'Postecke %d' % episode
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'ext': determine_ext(video_url),
|
'title': video_title,
|
||||||
'title': video_title,
|
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,6 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
|
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
|
||||||
'md5': '9cecf35f99c4079c199e9817882a9a1c',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '84981923',
|
'id': '84981923',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
|
@ -8,7 +8,7 @@ from ..utils import ExtractorError
|
|||||||
|
|
||||||
|
|
||||||
class FunnyOrDieIE(InfoExtractor):
|
class FunnyOrDieIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
|
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
||||||
'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
|
'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
|
||||||
@ -29,6 +29,9 @@ class FunnyOrDieIE(InfoExtractor):
|
|||||||
'description': 'Please use this to sell something. www.jonlajoie.com',
|
'description': 'Please use this to sell something. www.jonlajoie.com',
|
||||||
'thumbnail': 're:^http:.*\.jpg$',
|
'thumbnail': 're:^http:.*\.jpg$',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -325,7 +325,7 @@ class GenericIE(InfoExtractor):
|
|||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'uploader': 'www.handjobhub.com',
|
'uploader': 'www.handjobhub.com',
|
||||||
'title': 'Busty Blonde Siri Tit Fuck While Wank at Handjob Hub',
|
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
# RSS feed
|
# RSS feed
|
||||||
@ -405,6 +405,18 @@ class GenericIE(InfoExtractor):
|
|||||||
'expected_warnings': [
|
'expected_warnings': [
|
||||||
r'501.*Not Implemented'
|
r'501.*Not Implemented'
|
||||||
],
|
],
|
||||||
|
},
|
||||||
|
# Soundcloud embed
|
||||||
|
{
|
||||||
|
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '174391317',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
|
||||||
|
'uploader': 'Sophos Security',
|
||||||
|
'title': 'Chet Chat 171 - Oct 29, 2014',
|
||||||
|
'upload_date': '20141029',
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -838,7 +850,7 @@ class GenericIE(InfoExtractor):
|
|||||||
|
|
||||||
# Look for embeded soundcloud player
|
# Look for embeded soundcloud player
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
r'<iframe src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
|
r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
|
||||||
webpage)
|
webpage)
|
||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
url = unescapeHTML(mobj.group('url'))
|
url = unescapeHTML(mobj.group('url'))
|
||||||
@ -875,7 +887,7 @@ class GenericIE(InfoExtractor):
|
|||||||
return self.url_result(mobj.group('url'), 'SBS')
|
return self.url_result(mobj.group('url'), 'SBS')
|
||||||
|
|
||||||
mobj = re.search(
|
mobj = re.search(
|
||||||
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
|
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
|
||||||
webpage)
|
webpage)
|
||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
return self.url_result(mobj.group('url'), 'MLB')
|
return self.url_result(mobj.group('url'), 'MLB')
|
||||||
@ -933,7 +945,7 @@ class GenericIE(InfoExtractor):
|
|||||||
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
|
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
|
||||||
if not found:
|
if not found:
|
||||||
# HTML5 video
|
# HTML5 video
|
||||||
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]+)? src="([^"]+)"', webpage)
|
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src="([^"]+)"', webpage)
|
||||||
if not found:
|
if not found:
|
||||||
found = re.search(
|
found = re.search(
|
||||||
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
|
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
|
||||||
|
@ -46,9 +46,9 @@ class GorillaVidIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3rso4kdn6f9m',
|
'id': '3rso4kdn6f9m',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Micro Pig piglets ready on 16th July 2009',
|
'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
|
||||||
'thumbnail': 're:http://.*\.jpg',
|
'thumbnail': 're:http://.*\.jpg',
|
||||||
},
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://movpod.in/0wguyyxi1yca',
|
'url': 'http://movpod.in/0wguyyxi1yca',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -4,6 +4,7 @@ from __future__ import unicode_literals
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
get_meta_content,
|
get_meta_content,
|
||||||
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,20 +29,26 @@ class HeiseIE(InfoExtractor):
|
|||||||
'timestamp': 1411812600,
|
'timestamp': 1411812600,
|
||||||
'upload_date': '20140927',
|
'upload_date': '20140927',
|
||||||
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
|
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
|
||||||
|
'thumbnail': 're:https?://.*\.jpg$',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
json_url = self._search_regex(
|
|
||||||
r'json_url:\s*"([^"]+)"', webpage, 'json URL')
|
container_id = self._search_regex(
|
||||||
config = self._download_json(json_url, video_id)
|
r'<div class="videoplayerjw".*?data-container="([0-9]+)"',
|
||||||
|
webpage, 'container ID')
|
||||||
|
sequenz_id = self._search_regex(
|
||||||
|
r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"',
|
||||||
|
webpage, 'sequenz ID')
|
||||||
|
data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id)
|
||||||
|
doc = self._download_xml(data_url, video_id)
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'thumbnail': config.get('poster'),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
|
'timestamp': parse_iso8601(get_meta_content('date', webpage)),
|
||||||
'description': self._og_search_description(webpage),
|
'description': self._og_search_description(webpage),
|
||||||
}
|
}
|
||||||
@ -49,32 +56,19 @@ class HeiseIE(InfoExtractor):
|
|||||||
title = get_meta_content('fulltitle', webpage)
|
title = get_meta_content('fulltitle', webpage)
|
||||||
if title:
|
if title:
|
||||||
info['title'] = title
|
info['title'] = title
|
||||||
elif config.get('title'):
|
|
||||||
info['title'] = config['title']
|
|
||||||
else:
|
else:
|
||||||
info['title'] = self._og_search_title(webpage)
|
info['title'] = self._og_search_title(webpage)
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for t, rs in config['formats'].items():
|
for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):
|
||||||
if not rs or not hasattr(rs, 'items'):
|
label = source_node.attrib['label']
|
||||||
self._downloader.report_warning(
|
height = int_or_none(self._search_regex(
|
||||||
'formats: {0}: no resolutions'.format(t))
|
r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
|
||||||
continue
|
formats.append({
|
||||||
|
'url': source_node.attrib['file'],
|
||||||
for height_str, obj in rs.items():
|
'format_note': label,
|
||||||
format_id = '{0}_{1}'.format(t, height_str)
|
'height': height,
|
||||||
|
})
|
||||||
if not obj or not obj.get('url'):
|
|
||||||
self._downloader.report_warning(
|
|
||||||
'formats: {0}: no url'.format(format_id))
|
|
||||||
continue
|
|
||||||
|
|
||||||
formats.append({
|
|
||||||
'url': obj['url'],
|
|
||||||
'format_id': format_id,
|
|
||||||
'height': self._int(height_str, 'height'),
|
|
||||||
})
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
info['formats'] = formats
|
info['formats'] = formats
|
||||||
|
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
@ -20,13 +18,11 @@ class IconosquareIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
html_title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<title>(.+?)</title>',
|
r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>',
|
||||||
webpage, 'title')
|
webpage, 'title')
|
||||||
title = re.sub(r'(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)$', '', html_title)
|
|
||||||
uploader_id = self._html_search_regex(
|
uploader_id = self._html_search_regex(
|
||||||
r'@([^ ]+)', title, 'uploader name', fatal=False)
|
r'@([^ ]+)', title, 'uploader name', fatal=False)
|
||||||
|
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# encoding: utf-8
|
# encoding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
@ -21,22 +19,17 @@ class KickStarterIE(InfoExtractor):
|
|||||||
}, {
|
}, {
|
||||||
'note': 'Embedded video (not using the native kickstarter video service)',
|
'note': 'Embedded video (not using the native kickstarter video service)',
|
||||||
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
|
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
|
||||||
'playlist': [
|
'info_dict': {
|
||||||
{
|
'id': '78704821',
|
||||||
'info_dict': {
|
'ext': 'mp4',
|
||||||
'id': '78704821',
|
'uploader_id': 'pebble',
|
||||||
'ext': 'mp4',
|
'uploader': 'Pebble Technology',
|
||||||
'uploader_id': 'pebble',
|
'title': 'Pebble iOS Notifications',
|
||||||
'uploader': 'Pebble Technology',
|
}
|
||||||
'title': 'Pebble iOS Notifications',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
m = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = m.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
@ -18,11 +16,11 @@ class Ku6IE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._search_regex(r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
|
|
||||||
|
title = self._html_search_regex(
|
||||||
|
r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
|
||||||
dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
|
dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
|
||||||
jsonData = self._download_json(dataUrl, video_id)
|
jsonData = self._download_json(dataUrl, video_id)
|
||||||
downloadUrl = jsonData['data']['f']
|
downloadUrl = jsonData['data']['f']
|
||||||
|
77
youtube_dl/extractor/laola1tv.py
Normal file
77
youtube_dl/extractor/laola1tv.py
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import random
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
|
class Laola1TvIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.laola1.tv/de-de/live/bwf-bitburger-open-grand-prix-gold-court-1/250019.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '250019',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Bitburger Open Grand Prix Gold - Court 1',
|
||||||
|
'categories': ['Badminton'],
|
||||||
|
'uploader': 'BWF - Badminton World Federation',
|
||||||
|
'is_live': True,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_BROKEN = True # Not really - extractor works fine, but f4m downloader does not support live streams yet.
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
lang = mobj.group('lang')
|
||||||
|
portal = mobj.group('portal')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
iframe_url = self._search_regex(
|
||||||
|
r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
|
||||||
|
webpage, 'iframe URL')
|
||||||
|
|
||||||
|
iframe = self._download_webpage(
|
||||||
|
iframe_url, video_id, note='Downloading iframe')
|
||||||
|
flashvars_m = re.findall(
|
||||||
|
r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
|
||||||
|
flashvars = dict((m[0], m[1]) for m in flashvars_m)
|
||||||
|
|
||||||
|
xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
|
||||||
|
'play=%s&partner=1&portal=%s&v5ident=&lang=%s' % (
|
||||||
|
video_id, portal, lang))
|
||||||
|
hd_doc = self._download_xml(xml_url, video_id)
|
||||||
|
|
||||||
|
title = hd_doc.find('.//video/title').text
|
||||||
|
flash_url = hd_doc.find('.//video/url').text
|
||||||
|
categories = hd_doc.find('.//video/meta_sports').text.split(',')
|
||||||
|
uploader = hd_doc.find('.//video/meta_organistation').text
|
||||||
|
|
||||||
|
ident = random.randint(10000000, 99999999)
|
||||||
|
token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % (
|
||||||
|
flash_url, ident, flashvars['timestamp'], flashvars['auth'])
|
||||||
|
|
||||||
|
token_doc = self._download_xml(
|
||||||
|
token_url, video_id, note='Downloading token')
|
||||||
|
token_attrib = token_doc.find('.//token').attrib
|
||||||
|
if token_attrib.get('auth') == 'blocked':
|
||||||
|
raise ExtractorError('Token error: ' % token_attrib.get('comment'))
|
||||||
|
|
||||||
|
video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
|
||||||
|
token_attrib['url'], token_attrib['auth'])
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'is_live': True,
|
||||||
|
'title': title,
|
||||||
|
'url': video_url,
|
||||||
|
'uploader': uploader,
|
||||||
|
'categories': categories,
|
||||||
|
'ext': 'mp4',
|
||||||
|
}
|
||||||
|
|
@ -190,7 +190,8 @@ class LivestreamOriginalIE(InfoExtractor):
|
|||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': item.find('title').text,
|
'title': item.find('title').text,
|
||||||
'url': 'rtmp://extondemand.livestream.com/ondemand',
|
'url': 'rtmp://extondemand.livestream.com/ondemand',
|
||||||
'play_path': 'mp4:trans/dv15/mogulus-{0}.mp4'.format(path),
|
'play_path': 'trans/dv15/mogulus-{0}'.format(path),
|
||||||
|
'player_url': 'http://static.livestream.com/chromelessPlayer/v21/playerapi.swf?hash=5uetk&v=0803&classid=D27CDB6E-AE6D-11cf-96B8-444553540000&jsEnabled=false&wmode=opaque',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'thumbnail': thumbnail_url,
|
'thumbnail': thumbnail_url,
|
||||||
}
|
}
|
||||||
|
@ -32,9 +32,7 @@ class LRTIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
title = remove_end(self._og_search_title(webpage), ' - LRT')
|
title = remove_end(self._og_search_title(webpage), ' - LRT')
|
||||||
|
@ -10,7 +10,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class MLBIE(InfoExtractor):
|
class MLBIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://m\.mlb\.com/(?:(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|shared/video/embed/embed\.html\?.*?\bcontent_id=)(?P<id>n?\d+)'
|
_VALID_URL = r'https?://m(?:lb)?\.mlb\.com/(?:(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|(?:shared/video/embed/embed\.html|[^/]+/video/play\.jsp)\?.*?\bcontent_id=)(?P<id>n?\d+)'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
|
'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
|
||||||
@ -72,6 +72,14 @@ class MLBIE(InfoExtractor):
|
|||||||
'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
|
'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',
|
||||||
|
'only_matching': True,
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -7,6 +7,7 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
clean_html,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -31,6 +32,11 @@ class NaverIE(InfoExtractor):
|
|||||||
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
|
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
|
||||||
webpage)
|
webpage)
|
||||||
if m_id is None:
|
if m_id is None:
|
||||||
|
m_error = re.search(
|
||||||
|
r'(?s)<div class="nation_error">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
|
||||||
|
webpage)
|
||||||
|
if m_error:
|
||||||
|
raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
|
||||||
raise ExtractorError('couldn\'t extract vid and key')
|
raise ExtractorError('couldn\'t extract vid and key')
|
||||||
vid = m_id.group(1)
|
vid = m_id.group(1)
|
||||||
key = m_id.group(2)
|
key = m_id.group(2)
|
||||||
|
@ -26,8 +26,7 @@ class NBCIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
theplatform_url = self._search_regex('class="video-player video-player-full" data-mpx-url="(.*?)"', webpage, 'theplatform url')
|
theplatform_url = self._search_regex('class="video-player video-player-full" data-mpx-url="(.*?)"', webpage, 'theplatform url')
|
||||||
if theplatform_url.startswith('//'):
|
if theplatform_url.startswith('//'):
|
||||||
@ -57,7 +56,7 @@ class NBCNewsIE(InfoExtractor):
|
|||||||
'md5': 'b2421750c9f260783721d898f4c42063',
|
'md5': 'b2421750c9f260783721d898f4c42063',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'I1wpAI_zmhsQ',
|
'id': 'I1wpAI_zmhsQ',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'How Twitter Reacted To The Snowden Interview',
|
'title': 'How Twitter Reacted To The Snowden Interview',
|
||||||
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
|
'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
|
||||||
},
|
},
|
||||||
@ -97,6 +96,8 @@ class NBCNewsIE(InfoExtractor):
|
|||||||
]
|
]
|
||||||
|
|
||||||
for base_url in base_urls:
|
for base_url in base_urls:
|
||||||
|
if not base_url:
|
||||||
|
continue
|
||||||
playlist_url = base_url + '?form=MPXNBCNewsAPI'
|
playlist_url = base_url + '?form=MPXNBCNewsAPI'
|
||||||
all_videos = self._download_json(playlist_url, title)['videos']
|
all_videos = self._download_json(playlist_url, title)['videos']
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
determine_ext,
|
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@ -146,3 +147,36 @@ class NiconicoIE(InfoExtractor):
|
|||||||
'duration': duration,
|
'duration': duration,
|
||||||
'webpage_url': webpage_url,
|
'webpage_url': webpage_url,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class NiconicoPlaylistIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://www\.nicovideo\.jp/mylist/(?P<id>\d+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.nicovideo.jp/mylist/27411728',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '27411728',
|
||||||
|
'title': 'AKB48のオールナイトニッポン',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 225,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
list_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, list_id)
|
||||||
|
|
||||||
|
entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);',
|
||||||
|
webpage, 'entries')
|
||||||
|
entries = json.loads(entries_json)
|
||||||
|
entries = [{
|
||||||
|
'_type': 'url',
|
||||||
|
'ie_key': NiconicoIE.ie_key(),
|
||||||
|
'url': 'http://www.nicovideo.jp/watch/%s' % entry['item_id'],
|
||||||
|
} for entry in entries]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'_type': 'playlist',
|
||||||
|
'title': self._search_regex(r'\s+name: "(.*?)"', webpage, 'title'),
|
||||||
|
'id': list_id,
|
||||||
|
'entries': entries,
|
||||||
|
}
|
||||||
|
31
youtube_dl/extractor/phoenix.py
Normal file
31
youtube_dl/extractor/phoenix.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from .zdf import extract_from_xml_url
|
||||||
|
|
||||||
|
|
||||||
|
class PhoenixIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?phoenix\.de/content/(?P<id>[0-9]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://www.phoenix.de/content/884301',
|
||||||
|
'md5': 'ed249f045256150c92e72dbb70eadec6',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '884301',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Michael Krons mit Hans-Werner Sinn',
|
||||||
|
'description': 'Im Dialog - Sa. 25.10.14, 00.00 - 00.35 Uhr',
|
||||||
|
'upload_date': '20141025',
|
||||||
|
'uploader': 'Im Dialog',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
internal_id = self._search_regex(
|
||||||
|
r'<div class="phx_vod" id="phx_vod_([0-9]+)"',
|
||||||
|
webpage, 'internal video ID')
|
||||||
|
|
||||||
|
api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id
|
||||||
|
return extract_from_xml_url(self, video_id, api_url)
|
@ -16,13 +16,14 @@ from ..aes import (
|
|||||||
|
|
||||||
|
|
||||||
class PornHubIE(InfoExtractor):
|
class PornHubIE(InfoExtractor):
|
||||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9a-f]+))'
|
_VALID_URL = r'^https?://(?:www\.)?pornhub\.com/view_video\.php\?viewkey=(?P<id>[0-9a-f]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
|
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
|
||||||
'file': '648719015.mp4',
|
|
||||||
'md5': '882f488fa1f0026f023f33576004a2ed',
|
'md5': '882f488fa1f0026f023f33576004a2ed',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
"uploader": "BABES-COM",
|
'id': '648719015',
|
||||||
|
'ext': 'mp4',
|
||||||
|
"uploader": "Babes",
|
||||||
"title": "Seductive Indian beauty strips down and fingers her pink pussy",
|
"title": "Seductive Indian beauty strips down and fingers her pink pussy",
|
||||||
"age_limit": 18
|
"age_limit": 18
|
||||||
}
|
}
|
||||||
@ -35,9 +36,7 @@ class PornHubIE(InfoExtractor):
|
|||||||
return count
|
return count
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('videoid')
|
|
||||||
url = 'http://www.' + mobj.group('url')
|
|
||||||
|
|
||||||
req = compat_urllib_request.Request(url)
|
req = compat_urllib_request.Request(url)
|
||||||
req.add_header('Cookie', 'age_verified=1')
|
req.add_header('Cookie', 'age_verified=1')
|
||||||
@ -45,7 +44,7 @@ class PornHubIE(InfoExtractor):
|
|||||||
|
|
||||||
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
|
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
|
||||||
video_uploader = self._html_search_regex(
|
video_uploader = self._html_search_regex(
|
||||||
r'(?s)From: .+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
|
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|<span class="username)[^>]+>(.+?)<',
|
||||||
webpage, 'uploader', fatal=False)
|
webpage, 'uploader', fatal=False)
|
||||||
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
|
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
|
||||||
if thumbnail:
|
if thumbnail:
|
||||||
|
@ -14,7 +14,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class PromptFileIE(InfoExtractor):
|
class PromptFileIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)'
|
_VALID_URL = r'https?://(?:www\.)?promptfile\.com/l/(?P<id>[0-9A-Z\-]+)'
|
||||||
_FILE_NOT_FOUND_REGEX = r'<div.+id="not_found_msg".+>.+</div>[^-]'
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.promptfile.com/l/D21B4746E9-F01462F0FF',
|
'url': 'http://www.promptfile.com/l/D21B4746E9-F01462F0FF',
|
||||||
'md5': 'd1451b6302da7215485837aaea882c4c',
|
'md5': 'd1451b6302da7215485837aaea882c4c',
|
||||||
@ -27,11 +26,10 @@ class PromptFileIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
|
if re.search(r'<div.+id="not_found_msg".+>(?!We are).+</div>[^-]', webpage) is not None:
|
||||||
raise ExtractorError('Video %s does not exist' % video_id,
|
raise ExtractorError('Video %s does not exist' % video_id,
|
||||||
expected=True)
|
expected=True)
|
||||||
|
|
||||||
|
51
youtube_dl/extractor/quickvid.py
Normal file
51
youtube_dl/extractor/quickvid.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urlparse,
|
||||||
|
determine_ext,
|
||||||
|
int_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class QuickVidIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(www\.)?quickvid\.org/watch\.php\?v=(?P<id>[a-zA-Z_0-9-]+)'
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://quickvid.org/watch.php?v=sUQT3RCG8dx',
|
||||||
|
'md5': 'c0c72dd473f260c06c808a05d19acdc5',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'sUQT3RCG8dx',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Nick Offerman\'s Summer Reading Recap',
|
||||||
|
'thumbnail': 're:^https?://.*\.(?:png|jpg|gif)$',
|
||||||
|
'view_count': int,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
title = self._html_search_regex(r'<h2>(.*?)</h2>', webpage, 'title')
|
||||||
|
view_count = int_or_none(self._html_search_regex(
|
||||||
|
r'(?s)<div id="views">(.*?)</div>',
|
||||||
|
webpage, 'view count', fatal=False))
|
||||||
|
video_code = self._search_regex(
|
||||||
|
r'(?s)<video id="video"[^>]*>(.*?)</video>', webpage, 'video code')
|
||||||
|
formats = [
|
||||||
|
{
|
||||||
|
'url': compat_urlparse.urljoin(url, src),
|
||||||
|
'format_id': determine_ext(src, None),
|
||||||
|
} for src in re.findall('<source\s+src="([^"]+)"', video_code)
|
||||||
|
]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
'view_count': view_count,
|
||||||
|
}
|
@ -1,43 +1,43 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import compat_urllib_parse_unquote
|
||||||
clean_html,
|
|
||||||
compat_parse_qs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class Ro220IE(InfoExtractor):
|
class Ro220IE(InfoExtractor):
|
||||||
IE_NAME = '220.ro'
|
IE_NAME = '220.ro'
|
||||||
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<video_id>[^/]+)'
|
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
"url": "http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/",
|
'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
|
||||||
'file': 'LYV6doKo7f.mp4',
|
|
||||||
'md5': '03af18b73a07b4088753930db7a34add',
|
'md5': '03af18b73a07b4088753930db7a34add',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
"title": "Luati-le Banii sez 4 ep 1",
|
'id': 'LYV6doKo7f',
|
||||||
"description": "re:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$",
|
'ext': 'mp4',
|
||||||
|
'title': 'Luati-le Banii sez 4 ep 1',
|
||||||
|
'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('video_id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
flashVars_str = self._search_regex(
|
url = compat_urllib_parse_unquote(self._search_regex(
|
||||||
r'<param name="flashVars" value="([^"]+)"',
|
r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
|
||||||
webpage, 'flashVars')
|
title = self._og_search_title(webpage)
|
||||||
flashVars = compat_parse_qs(flashVars_str)
|
description = self._og_search_description(webpage)
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
|
|
||||||
|
formats = [{
|
||||||
|
'format_id': 'sd',
|
||||||
|
'url': url,
|
||||||
|
'ext': 'mp4',
|
||||||
|
}]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'video',
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'ext': 'mp4',
|
'formats': formats,
|
||||||
'url': flashVars['videoURL'][0],
|
'title': title,
|
||||||
'title': flashVars['title'][0],
|
'description': description,
|
||||||
'description': clean_html(flashVars['desc'][0]),
|
'thumbnail': thumbnail,
|
||||||
'thumbnail': flashVars['preview'][0],
|
|
||||||
}
|
}
|
||||||
|
@ -81,7 +81,7 @@ class RTLnowIE(InfoExtractor):
|
|||||||
'id': '99205',
|
'id': '99205',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'title': 'Medicopter 117 - Angst!',
|
'title': 'Medicopter 117 - Angst!',
|
||||||
'description': 'md5:895b1df01639b5f61a04fc305a5cb94d',
|
'description': 're:^Im Therapiezentrum \'Sonnalm\' kommen durch eine Unachtsamkeit die für die B.handlung mit Phobikern gehaltenen Voglespinnen frei\. Eine Ausreißerin',
|
||||||
'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg',
|
'thumbnail': 'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg',
|
||||||
'upload_date': '20080928',
|
'upload_date': '20080928',
|
||||||
'duration': 2691,
|
'duration': 2691,
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
@ -21,19 +19,20 @@ class RUHDIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_url = self._html_search_regex(
|
video_url = self._html_search_regex(
|
||||||
r'<param name="src" value="([^"]+)"', webpage, 'video url')
|
r'<param name="src" value="([^"]+)"', webpage, 'video url')
|
||||||
title = self._html_search_regex(
|
title = self._html_search_regex(
|
||||||
r'<title>([^<]+) RUHD.ru - Видео Высокого качества №1 в России!</title>', webpage, 'title')
|
r'<title>([^<]+) RUHD.ru - Видео Высокого качества №1 в России!</title>',
|
||||||
|
webpage, 'title')
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'(?s)<div id="longdesc">(.+?)<span id="showlink">', webpage, 'description', fatal=False)
|
r'(?s)<div id="longdesc">(.+?)<span id="showlink">',
|
||||||
|
webpage, 'description', fatal=False)
|
||||||
thumbnail = self._html_search_regex(
|
thumbnail = self._html_search_regex(
|
||||||
r'<param name="previewImage" value="([^"]+)"', webpage, 'thumbnail', fatal=False)
|
r'<param name="previewImage" value="([^"]+)"',
|
||||||
|
webpage, 'thumbnail', fatal=False)
|
||||||
if thumbnail:
|
if thumbnail:
|
||||||
thumbnail = 'http://www.ruhd.ru' + thumbnail
|
thumbnail = 'http://www.ruhd.ru' + thumbnail
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@ from .common import InfoExtractor
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
int_or_none,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
43
youtube_dl/extractor/srmediathek.py
Normal file
43
youtube_dl/extractor/srmediathek.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# encoding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import js_to_json
|
||||||
|
|
||||||
|
|
||||||
|
class SRMediathekIE(InfoExtractor):
|
||||||
|
IE_DESC = 'Süddeutscher Rundfunk'
|
||||||
|
_VALID_URL = r'https?://sr-mediathek\.sr-online\.de/index\.php\?.*?&id=(?P<id>[0-9]+)'
|
||||||
|
|
||||||
|
_TEST = {
|
||||||
|
'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '28455',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'sportarena (26.10.2014)',
|
||||||
|
'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ',
|
||||||
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
murls = json.loads(js_to_json(self._search_regex(
|
||||||
|
r'var mediaURLs\s*=\s*(.*?);\n', webpage, 'video URLs')))
|
||||||
|
formats = [{'url': murl} for murl in murls]
|
||||||
|
self._sort_formats(formats)
|
||||||
|
|
||||||
|
title = json.loads(js_to_json(self._search_regex(
|
||||||
|
r'var mediaTitles\s*=\s*(.*?);\n', webpage, 'title')))[0]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'description': self._og_search_description(webpage),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
|
}
|
@ -10,7 +10,6 @@ class SyfyIE(InfoExtractor):
|
|||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458',
|
'url': 'http://www.syfy.com/videos/Robot%20Combat%20League/Behind%20the%20Scenes/vid:2631458',
|
||||||
'md5': 'e07de1d52c7278adbb9b9b1c93a66849',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'NmqMrGnXvmO1',
|
'id': 'NmqMrGnXvmO1',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
|
@ -6,6 +6,7 @@ import json
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
compat_str,
|
compat_str,
|
||||||
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
)
|
)
|
||||||
@ -34,10 +35,21 @@ class ThePlatformIE(InfoExtractor):
|
|||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
if mobj.group('config'):
|
||||||
|
config_url = url+ '&form=json'
|
||||||
|
config_url = config_url.replace('swf/', 'config/')
|
||||||
|
config_url = config_url.replace('onsite/', 'onsite/config/')
|
||||||
|
config = self._download_json(config_url, video_id, 'Downloading config')
|
||||||
|
smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
|
||||||
|
else:
|
||||||
|
smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
|
||||||
|
'format=smil&mbr=true'.format(video_id))
|
||||||
|
|
||||||
|
|
||||||
def _get_info(self, video_id, smil_url):
|
|
||||||
meta = self._download_xml(smil_url, video_id)
|
meta = self._download_xml(smil_url, video_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
error_msg = next(
|
error_msg = next(
|
||||||
n.attrib['abstract']
|
n.attrib['abstract']
|
||||||
@ -89,10 +101,14 @@ class ThePlatformIE(InfoExtractor):
|
|||||||
for f in switch.findall(_x('smil:video')):
|
for f in switch.findall(_x('smil:video')):
|
||||||
attr = f.attrib
|
attr = f.attrib
|
||||||
vbr = int(attr['system-bitrate']) // 1000
|
vbr = int(attr['system-bitrate']) // 1000
|
||||||
|
ext = determine_ext(attr['src'])
|
||||||
|
if ext == 'once':
|
||||||
|
ext = 'mp4'
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': compat_str(vbr),
|
'format_id': compat_str(vbr),
|
||||||
'url': attr['src'],
|
'url': attr['src'],
|
||||||
'vbr': vbr,
|
'vbr': vbr,
|
||||||
|
'ext': ext,
|
||||||
})
|
})
|
||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
@ -104,17 +120,3 @@ class ThePlatformIE(InfoExtractor):
|
|||||||
'thumbnail': info['defaultThumbnailUrl'],
|
'thumbnail': info['defaultThumbnailUrl'],
|
||||||
'duration': info['duration']//1000,
|
'duration': info['duration']//1000,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
if mobj.group('config'):
|
|
||||||
config_url = url+ '&form=json'
|
|
||||||
config_url = config_url.replace('swf/', 'config/')
|
|
||||||
config_url = config_url.replace('onsite/', 'onsite/config/')
|
|
||||||
config = self._download_json(config_url, video_id, 'Downloading config')
|
|
||||||
smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
|
|
||||||
else:
|
|
||||||
smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
|
|
||||||
'format=smil&mbr=true'.format(video_id))
|
|
||||||
return self._get_info(video_id, smil_url)
|
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import xpath_text
|
||||||
|
|
||||||
|
|
||||||
class TruTubeIE(InfoExtractor):
|
class TruTubeIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?trutube\.tv/video/(?P<id>[0-9]+)/.*'
|
_VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
|
'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
|
||||||
'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
|
'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -16,29 +15,26 @@ class TruTubeIE(InfoExtractor):
|
|||||||
'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
|
'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
|
||||||
'thumbnail': 're:^http:.*\.jpg$',
|
'thumbnail': 're:^http:.*\.jpg$',
|
||||||
}
|
}
|
||||||
}
|
}, {
|
||||||
|
'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
config = self._download_xml(
|
||||||
video_title = self._og_search_title(webpage).strip()
|
'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
|
||||||
thumbnail = self._search_regex(
|
video_id, transform_source=lambda s: s.strip())
|
||||||
r"var splash_img = '([^']+)';", webpage, 'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
all_formats = re.finditer(
|
# filehd is always 404
|
||||||
r"var (?P<key>[a-z]+)_video_file\s*=\s*'(?P<url>[^']+)';", webpage)
|
video_url = xpath_text(config, './file', 'video URL', fatal=True)
|
||||||
formats = [{
|
title = xpath_text(config, './title', 'title')
|
||||||
'format_id': m.group('key'),
|
thumbnail = xpath_text(config, './image', ' thumbnail')
|
||||||
'quality': -i,
|
|
||||||
'url': m.group('url'),
|
|
||||||
} for i, m in enumerate(all_formats)]
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': video_title,
|
'url': video_url,
|
||||||
'formats': formats,
|
'title': title,
|
||||||
'thumbnail': thumbnail,
|
'thumbnail': thumbnail,
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ class UstreamChannelIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '10874166',
|
'id': '10874166',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 54,
|
'playlist_mincount': 17,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -17,7 +17,7 @@ class VGTVIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '84196',
|
'id': '84196',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Hevnen er søt episode 10: Abu',
|
'title': 'Hevnen er søt episode 1:10 - Abu',
|
||||||
'description': 'md5:e25e4badb5f544b04341e14abdc72234',
|
'description': 'md5:e25e4badb5f544b04341e14abdc72234',
|
||||||
'thumbnail': 're:^https?://.*\.jpg',
|
'thumbnail': 're:^https?://.*\.jpg',
|
||||||
'duration': 648.000,
|
'duration': 648.000,
|
||||||
@ -67,9 +67,7 @@ class VGTVIE(InfoExtractor):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('id')
|
|
||||||
|
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
'http://svp.vg.no/svp/api/v1/vgtv/assets/%s?appName=vgtv-website' % video_id,
|
'http://svp.vg.no/svp/api/v1/vgtv/assets/%s?appName=vgtv-website' % video_id,
|
||||||
video_id, 'Downloading media JSON')
|
video_id, 'Downloading media JSON')
|
||||||
|
@ -8,13 +8,11 @@ import itertools
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .subtitles import SubtitlesInfoExtractor
|
from .subtitles import SubtitlesInfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
compat_urllib_request,
|
compat_urllib_request,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
get_element_by_attribute,
|
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
@ -514,7 +512,7 @@ class VimeoReviewIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '91613211',
|
'id': '91613211',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Death by dogma versus assembling agile - Sander Hoogendoorn',
|
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
|
||||||
'uploader': 'DevWeek Events',
|
'uploader': 'DevWeek Events',
|
||||||
'duration': 2773,
|
'duration': 2773,
|
||||||
'thumbnail': 're:^https?://.*\.jpg$',
|
'thumbnail': 're:^https?://.*\.jpg$',
|
||||||
|
@ -70,7 +70,7 @@ class VineUserIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'Visa',
|
'id': 'Visa',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 47,
|
'playlist_mincount': 46,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -138,9 +138,19 @@ class VKIE(InfoExtractor):
|
|||||||
info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id
|
info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id
|
||||||
info_page = self._download_webpage(info_url, video_id)
|
info_page = self._download_webpage(info_url, video_id)
|
||||||
|
|
||||||
if re.search(r'<!>Please log in or <', info_page):
|
ERRORS = {
|
||||||
raise ExtractorError('This video is only available for registered users, '
|
r'>Видеозапись .*? была изъята из публичного доступа в связи с обращением правообладателя.<':
|
||||||
'use --username and --password options to provide account credentials.', expected=True)
|
'Video %s has been removed from public access due to rightholder complaint.',
|
||||||
|
r'<!>Please log in or <':
|
||||||
|
'Video %s is only available for registered users, '
|
||||||
|
'use --username and --password options to provide account credentials.',
|
||||||
|
'<!>Unknown error':
|
||||||
|
'Video %s does not exist.'
|
||||||
|
}
|
||||||
|
|
||||||
|
for error_re, error_msg in ERRORS.items():
|
||||||
|
if re.search(error_re, info_page):
|
||||||
|
raise ExtractorError(error_msg % video_id, expected=True)
|
||||||
|
|
||||||
m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page)
|
m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page)
|
||||||
if m_yt is not None:
|
if m_yt is not None:
|
||||||
|
@ -37,7 +37,7 @@ class WimpIE(InfoExtractor):
|
|||||||
video_id = mobj.group(1)
|
video_id = mobj.group(1)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
video_url = self._search_regex(
|
video_url = self._search_regex(
|
||||||
r's1\.addVariable\("file",\s*"([^"]+)"\);', webpage, 'video URL')
|
r"'file'\s*:\s*'([^']+)'", webpage, 'video URL')
|
||||||
if YoutubeIE.suitable(video_url):
|
if YoutubeIE.suitable(video_url):
|
||||||
self.to_screen('Found YouTube video')
|
self.to_screen('Found YouTube video')
|
||||||
return {
|
return {
|
||||||
|
@ -20,7 +20,7 @@ class XTubeIE(InfoExtractor):
|
|||||||
'id': 'kVTUy_G222_',
|
'id': 'kVTUy_G222_',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'strange erotica',
|
'title': 'strange erotica',
|
||||||
'description': 'surreal gay themed erotica...almost an ET kind of thing',
|
'description': 'http://www.xtube.com an ET kind of thing',
|
||||||
'uploader': 'greenshowers',
|
'uploader': 'greenshowers',
|
||||||
'duration': 450,
|
'duration': 450,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
|
@ -13,7 +13,6 @@ class YnetIE(InfoExtractor):
|
|||||||
_TESTS = [
|
_TESTS = [
|
||||||
{
|
{
|
||||||
'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
|
'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
|
||||||
'md5': '4b29cb57c3dddd57642b3f051f535b07',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'L-11659-99244',
|
'id': 'L-11659-99244',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
@ -22,7 +21,6 @@ class YnetIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
|
'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
|
||||||
'md5': '8194c2ea221e9a639cac96b6b0753dc5',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'L-8859-84418',
|
'id': 'L-8859-84418',
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
|
@ -185,8 +185,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
|||||||
|
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
req, None,
|
req, None,
|
||||||
note='Confirming age', errnote='Unable to confirm age')
|
note='Confirming age', errnote='Unable to confirm age',
|
||||||
return True
|
fatal=False)
|
||||||
|
|
||||||
def _real_initialize(self):
|
def _real_initialize(self):
|
||||||
if self._downloader is None:
|
if self._downloader is None:
|
||||||
@ -274,6 +274,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
|
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
|
||||||
|
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
|
||||||
|
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
|
||||||
|
|
||||||
# Dash mp4 audio
|
# Dash mp4 audio
|
||||||
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
|
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
|
||||||
@ -297,6 +300,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
|||||||
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
|
||||||
|
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
|
||||||
|
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
|
||||||
|
|
||||||
# Dash webm audio
|
# Dash webm audio
|
||||||
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
|
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
|
||||||
@ -1057,7 +1062,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
|||||||
'note': 'issue #673',
|
'note': 'issue #673',
|
||||||
'url': 'PLBB231211A4F62143',
|
'url': 'PLBB231211A4F62143',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'title': 'Team Fortress 2 (Class-based LP)',
|
'title': '[OLD]Team Fortress 2 (Class-based LP)',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 26,
|
'playlist_mincount': 26,
|
||||||
}, {
|
}, {
|
||||||
|
@ -10,8 +10,84 @@ from ..utils import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_from_xml_url(ie, video_id, xml_url):
|
||||||
|
doc = ie._download_xml(
|
||||||
|
xml_url, video_id,
|
||||||
|
note='Downloading video info',
|
||||||
|
errnote='Failed to download video info')
|
||||||
|
|
||||||
|
title = doc.find('.//information/title').text
|
||||||
|
description = doc.find('.//information/detail').text
|
||||||
|
duration = int(doc.find('.//details/lengthSec').text)
|
||||||
|
uploader_node = doc.find('.//details/originChannelTitle')
|
||||||
|
uploader = None if uploader_node is None else uploader_node.text
|
||||||
|
uploader_id_node = doc.find('.//details/originChannelId')
|
||||||
|
uploader_id = None if uploader_id_node is None else uploader_id_node.text
|
||||||
|
upload_date = unified_strdate(doc.find('.//details/airtime').text)
|
||||||
|
|
||||||
|
def xml_to_format(fnode):
|
||||||
|
video_url = fnode.find('url').text
|
||||||
|
is_available = 'http://www.metafilegenerator' not in video_url
|
||||||
|
|
||||||
|
format_id = fnode.attrib['basetype']
|
||||||
|
format_m = re.match(r'''(?x)
|
||||||
|
(?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
|
||||||
|
(?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
|
||||||
|
''', format_id)
|
||||||
|
|
||||||
|
ext = format_m.group('container')
|
||||||
|
proto = format_m.group('proto').lower()
|
||||||
|
|
||||||
|
quality = fnode.find('./quality').text
|
||||||
|
abr = int(fnode.find('./audioBitrate').text) // 1000
|
||||||
|
vbr_node = fnode.find('./videoBitrate')
|
||||||
|
vbr = None if vbr_node is None else int(vbr_node.text) // 1000
|
||||||
|
|
||||||
|
width_node = fnode.find('./width')
|
||||||
|
width = None if width_node is None else int_or_none(width_node.text)
|
||||||
|
height_node = fnode.find('./height')
|
||||||
|
height = None if height_node is None else int_or_none(height_node.text)
|
||||||
|
|
||||||
|
format_note = ''
|
||||||
|
if not format_note:
|
||||||
|
format_note = None
|
||||||
|
|
||||||
|
return {
|
||||||
|
'format_id': format_id + '-' + quality,
|
||||||
|
'url': video_url,
|
||||||
|
'ext': ext,
|
||||||
|
'acodec': format_m.group('acodec'),
|
||||||
|
'vcodec': format_m.group('vcodec'),
|
||||||
|
'abr': abr,
|
||||||
|
'vbr': vbr,
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
|
'filesize': int_or_none(fnode.find('./filesize').text),
|
||||||
|
'format_note': format_note,
|
||||||
|
'protocol': proto,
|
||||||
|
'_available': is_available,
|
||||||
|
}
|
||||||
|
|
||||||
|
format_nodes = doc.findall('.//formitaeten/formitaet')
|
||||||
|
formats = list(filter(
|
||||||
|
lambda f: f['_available'],
|
||||||
|
map(xml_to_format, format_nodes)))
|
||||||
|
ie._sort_formats(formats)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'duration': duration,
|
||||||
|
'uploader': uploader,
|
||||||
|
'uploader_id': uploader_id,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class ZDFIE(InfoExtractor):
|
class ZDFIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://www\.zdf\.de/ZDFmediathek(?P<hash>#)?/(.*beitrag/(?:video/)?)(?P<video_id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
|
_VALID_URL = r'^https?://www\.zdf\.de/ZDFmediathek(?P<hash>#)?/(.*beitrag/(?:video/)?)(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
|
'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
|
||||||
@ -29,81 +105,7 @@ class ZDFIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
video_id = self._match_id(url)
|
||||||
video_id = mobj.group('video_id')
|
|
||||||
|
|
||||||
xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
|
xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
|
||||||
doc = self._download_xml(
|
return extract_from_xml_url(self, video_id, xml_url)
|
||||||
xml_url, video_id,
|
|
||||||
note='Downloading video info',
|
|
||||||
errnote='Failed to download video info')
|
|
||||||
|
|
||||||
title = doc.find('.//information/title').text
|
|
||||||
description = doc.find('.//information/detail').text
|
|
||||||
duration = int(doc.find('.//details/lengthSec').text)
|
|
||||||
uploader_node = doc.find('.//details/originChannelTitle')
|
|
||||||
uploader = None if uploader_node is None else uploader_node.text
|
|
||||||
uploader_id_node = doc.find('.//details/originChannelId')
|
|
||||||
uploader_id = None if uploader_id_node is None else uploader_id_node.text
|
|
||||||
upload_date = unified_strdate(doc.find('.//details/airtime').text)
|
|
||||||
|
|
||||||
def xml_to_format(fnode):
|
|
||||||
video_url = fnode.find('url').text
|
|
||||||
is_available = 'http://www.metafilegenerator' not in video_url
|
|
||||||
|
|
||||||
format_id = fnode.attrib['basetype']
|
|
||||||
format_m = re.match(r'''(?x)
|
|
||||||
(?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
|
|
||||||
(?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
|
|
||||||
''', format_id)
|
|
||||||
|
|
||||||
ext = format_m.group('container')
|
|
||||||
proto = format_m.group('proto').lower()
|
|
||||||
|
|
||||||
quality = fnode.find('./quality').text
|
|
||||||
abr = int(fnode.find('./audioBitrate').text) // 1000
|
|
||||||
vbr_node = fnode.find('./videoBitrate')
|
|
||||||
vbr = None if vbr_node is None else int(vbr_node.text) // 1000
|
|
||||||
|
|
||||||
width_node = fnode.find('./width')
|
|
||||||
width = None if width_node is None else int_or_none(width_node.text)
|
|
||||||
height_node = fnode.find('./height')
|
|
||||||
height = None if height_node is None else int_or_none(height_node.text)
|
|
||||||
|
|
||||||
format_note = ''
|
|
||||||
if not format_note:
|
|
||||||
format_note = None
|
|
||||||
|
|
||||||
return {
|
|
||||||
'format_id': format_id + '-' + quality,
|
|
||||||
'url': video_url,
|
|
||||||
'ext': ext,
|
|
||||||
'acodec': format_m.group('acodec'),
|
|
||||||
'vcodec': format_m.group('vcodec'),
|
|
||||||
'abr': abr,
|
|
||||||
'vbr': vbr,
|
|
||||||
'width': width,
|
|
||||||
'height': height,
|
|
||||||
'filesize': int_or_none(fnode.find('./filesize').text),
|
|
||||||
'format_note': format_note,
|
|
||||||
'protocol': proto,
|
|
||||||
'_available': is_available,
|
|
||||||
}
|
|
||||||
|
|
||||||
format_nodes = doc.findall('.//formitaeten/formitaet')
|
|
||||||
formats = list(filter(
|
|
||||||
lambda f: f['_available'],
|
|
||||||
map(xml_to_format, format_nodes)))
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'duration': duration,
|
|
||||||
'uploader': uploader,
|
|
||||||
'uploader_id': uploader_id,
|
|
||||||
'upload_date': upload_date,
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
|
@ -8,7 +8,6 @@ import time
|
|||||||
from .common import AudioConversionError, PostProcessor
|
from .common import AudioConversionError, PostProcessor
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
check_executable,
|
|
||||||
compat_subprocess_get_DEVNULL,
|
compat_subprocess_get_DEVNULL,
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
@ -78,7 +77,7 @@ class FFmpegPostProcessor(PostProcessor):
|
|||||||
@property
|
@property
|
||||||
def _probe_executable(self):
|
def _probe_executable(self):
|
||||||
if self._downloader.params.get('prefer_ffmpeg', False):
|
if self._downloader.params.get('prefer_ffmpeg', False):
|
||||||
prefs = ('ffproe', 'avprobe')
|
prefs = ('ffprobe', 'avprobe')
|
||||||
else:
|
else:
|
||||||
prefs = ('avprobe', 'ffprobe')
|
prefs = ('avprobe', 'ffprobe')
|
||||||
for p in prefs:
|
for p in prefs:
|
||||||
|
@ -925,7 +925,7 @@ def parse_iso8601(date_str, delimiter='T'):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
m = re.search(
|
m = re.search(
|
||||||
r'Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$',
|
r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
|
||||||
date_str)
|
date_str)
|
||||||
if not m:
|
if not m:
|
||||||
timezone = datetime.timedelta()
|
timezone = datetime.timedelta()
|
||||||
@ -938,7 +938,7 @@ def parse_iso8601(date_str, delimiter='T'):
|
|||||||
timezone = datetime.timedelta(
|
timezone = datetime.timedelta(
|
||||||
hours=sign * int(m.group('hours')),
|
hours=sign * int(m.group('hours')),
|
||||||
minutes=sign * int(m.group('minutes')))
|
minutes=sign * int(m.group('minutes')))
|
||||||
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
|
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
|
||||||
dt = datetime.datetime.strptime(date_str, date_format) - timezone
|
dt = datetime.datetime.strptime(date_str, date_format) - timezone
|
||||||
return calendar.timegm(dt.timetuple())
|
return calendar.timegm(dt.timetuple())
|
||||||
|
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
|
|
||||||
__version__ = '2014.10.26.1'
|
__version__ = '2014.11.02.1'
|
||||||
|
Reference in New Issue
Block a user