Compare commits
59 Commits
2013.01.06
...
2013.01.13
Author | SHA1 | Date | |
---|---|---|---|
|
bbc3e2753a | ||
|
67353612ba | ||
|
bffbd5f038 | ||
|
d8bbf2018e | ||
|
187f491ad2 | ||
|
335959e778 | ||
|
3b83bf8f6a | ||
|
51719893bf | ||
|
1841f65e64 | ||
|
bb28998920 | ||
|
fbc5f99db9 | ||
|
ca0a0bbeec | ||
|
6119f78cb9 | ||
|
539679c7f9 | ||
|
b642cd44c1 | ||
|
fffec3b9d9 | ||
|
3446dfb7cb | ||
|
db16276b7c | ||
|
629fcdd135 | ||
|
64ce2aada8 | ||
|
565f751967 | ||
|
6017964580 | ||
|
1d16b0c3fe | ||
|
7851b37993 | ||
|
d81edc573e | ||
|
ef0c8d5f9f | ||
|
db30f02b50 | ||
|
4ba7262467 | ||
|
67d0c25eab | ||
|
09f9552b40 | ||
|
142d38f776 | ||
|
6dd3471900 | ||
|
280d67896a | ||
|
510e6f6dc1 | ||
|
712e86b999 | ||
|
74fdba620d | ||
|
dc1c479a6f | ||
|
119d536e07 | ||
|
fa1bf9c653 | ||
|
814eed0ea1 | ||
|
0aa3068e9e | ||
|
db2d6124b1 | ||
|
039dc61bd2 | ||
|
9450bfa26e | ||
|
18be482a6f | ||
|
ca6710ee41 | ||
|
caec7618a1 | ||
|
7e7ab2815c | ||
|
d7744f2219 | ||
|
7161829de5 | ||
|
991ba7fae3 | ||
|
a7539296ce | ||
|
258d5850c9 | ||
|
187da2c093 | ||
|
9a2cf56d51 | ||
|
5f7ad21633 | ||
|
089d47f8d5 | ||
|
fdef722fa1 | ||
|
110d4f4c91 |
17
.tarignore
17
.tarignore
@@ -1,17 +0,0 @@
|
||||
updates_key.pem
|
||||
*.pyc
|
||||
*.pyo
|
||||
youtube-dl.exe
|
||||
wine-py2exe/
|
||||
py2exe.log
|
||||
*.kate-swp
|
||||
build/
|
||||
dist/
|
||||
MANIFEST
|
||||
*.DS_Store
|
||||
youtube-dl.tar.gz
|
||||
.coverage
|
||||
cover/
|
||||
__pycache__/
|
||||
.git/
|
||||
*~
|
@@ -1 +1 @@
|
||||
9999.99.99
|
||||
2012.12.99
|
||||
|
23
Makefile
23
Makefile
@@ -1,7 +1,7 @@
|
||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||
|
||||
clean:
|
||||
rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/
|
||||
rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
|
||||
|
||||
PREFIX=/usr/local
|
||||
BINDIR=$(PREFIX)/bin
|
||||
@@ -20,7 +20,9 @@ test:
|
||||
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
|
||||
nosetests --verbose test
|
||||
|
||||
.PHONY: all clean install test
|
||||
tar: youtube-dl.tar.gz
|
||||
|
||||
.PHONY: all clean install test tar
|
||||
|
||||
youtube-dl: youtube_dl/*.py
|
||||
zip --quiet youtube-dl youtube_dl/*.py
|
||||
@@ -42,6 +44,17 @@ youtube-dl.1: README.md
|
||||
youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
|
||||
python devscripts/bash-completion.py
|
||||
|
||||
youtube-dl.tar.gz: all
|
||||
tar -cvzf youtube-dl.tar.gz -s "|^./|./youtube-dl/|" \
|
||||
--exclude-from=".tarignore" -- .
|
||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||
--exclude '*.DS_Store' \
|
||||
--exclude '*.kate-swp' \
|
||||
--exclude '*.pyc' \
|
||||
--exclude '*.pyo' \
|
||||
--exclude '*~' \
|
||||
--exclude '__pycache' \
|
||||
--exclude '.git' \
|
||||
-- \
|
||||
bin devscripts test youtube_dl \
|
||||
CHANGELOG LICENSE README.md README.txt \
|
||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
|
||||
youtube-dl
|
||||
|
12
README.md
12
README.md
@@ -9,8 +9,8 @@ youtube-dl
|
||||
# DESCRIPTION
|
||||
**youtube-dl** is a small command-line program to download videos from
|
||||
YouTube.com and a few more sites. It requires the Python interpreter, version
|
||||
2.x (x being at least 6), and it is not platform specific. It should work in
|
||||
your Unix box, in Windows or in Mac OS X. It is released to the public domain,
|
||||
2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
|
||||
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
||||
which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# OPTIONS
|
||||
@@ -105,11 +105,13 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
## Post-processing Options:
|
||||
-x, --extract-audio convert video files to audio-only files (requires
|
||||
ffmpeg or avconv and ffprobe or avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", or "wav";
|
||||
best by default
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
|
||||
"wav"; best by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert a
|
||||
value between 0 (better) and 9 (worse) for VBR or a
|
||||
specific bitrate like 128K (default 5)
|
||||
--recode-video FORMAT Encode the video to another format if necessary
|
||||
(currently supported: mp4|flv|ogg|webm)
|
||||
-k, --keep-video keeps the video file on disk after the post-
|
||||
processing; the video is erased by default
|
||||
--no-post-overwrites do not overwrite post-processed files; the post-
|
||||
@@ -117,7 +119,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.local/config/youtube-dl.conf`.
|
||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl.conf`.
|
||||
|
||||
# OUTPUT TEMPLATE
|
||||
|
||||
|
@@ -4,12 +4,17 @@ import rsa
|
||||
import json
|
||||
from binascii import hexlify
|
||||
|
||||
try:
|
||||
input = raw_input
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
if 'signature' in versions_info:
|
||||
del versions_info['signature']
|
||||
|
||||
print('Enter the PKCS1 private key, followed by a blank line:')
|
||||
privkey = ''
|
||||
privkey = b''
|
||||
while True:
|
||||
try:
|
||||
line = input()
|
||||
@@ -17,8 +22,7 @@ while True:
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
privkey += line + '\n'
|
||||
privkey = bytes(privkey, 'ascii')
|
||||
privkey += line.encode('ascii') + b'\n'
|
||||
privkey = rsa.PrivateKey.load_pkcs1(privkey)
|
||||
|
||||
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
|
||||
|
@@ -47,39 +47,40 @@ REV=$(git rev-parse HEAD)
|
||||
make youtube-dl youtube-dl.tar.gz
|
||||
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
|
||||
wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
|
||||
mkdir -p "update_staging/$version"
|
||||
mv youtube-dl youtube-dl.exe "update_staging/$version"
|
||||
mv youtube-dl.tar.gz "update_staging/$version/youtube-dl-$version.tar.gz"
|
||||
RELEASE_FILES=youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz
|
||||
(cd update_staging/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
|
||||
(cd update_staging/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
|
||||
(cd update_staging/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
|
||||
(cd update_staging/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
||||
mkdir -p "build/$version"
|
||||
mv youtube-dl youtube-dl.exe "build/$version"
|
||||
mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz"
|
||||
RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
|
||||
(cd build/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
|
||||
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
|
||||
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
|
||||
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
||||
git checkout HEAD -- youtube-dl youtube-dl.exe
|
||||
|
||||
echo "\n### Signing and uploading the new binaries to youtube-dl.org..."
|
||||
for f in $RELEASE_FILES; do gpg --detach-sig "update_staging/$version/$f"; done
|
||||
scp -r "update_staging/$version" ytdl@youtube-dl.org:html/downloads/
|
||||
rm -r update_staging
|
||||
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
|
||||
scp -r "build/$version" ytdl@youtube-dl.org:html/downloads/
|
||||
|
||||
echo "\n### Now switching to gh-pages..."
|
||||
git checkout gh-pages
|
||||
git checkout "$MASTER" -- devscripts/gh-pages/
|
||||
git reset devscripts/gh-pages/
|
||||
devscripts/gh-pages/add-version.py $version
|
||||
devscripts/gh-pages/sign-versions.py < updates_key.pem
|
||||
devscripts/gh-pages/generate-download.py
|
||||
devscripts/gh-pages/update-copyright.py
|
||||
rm -r test_coverage
|
||||
mv cover test_coverage
|
||||
git add *.html *.html.in update test_coverage
|
||||
git commit -m "release $version"
|
||||
git show HEAD
|
||||
read -p "Is it good, can I push? (y/n) " -n 1
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
|
||||
echo
|
||||
git push origin gh-pages
|
||||
git clone --branch gh-pages --single-branch . build/gh-pages
|
||||
ROOT=$(pwd)
|
||||
(
|
||||
set -e
|
||||
ORIGIN_URL=$(git config --get remote.origin.url)
|
||||
cd build/gh-pages
|
||||
"$ROOT/devscripts/gh-pages/add-version.py" $version
|
||||
"$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
|
||||
"$ROOT/devscripts/gh-pages/generate-download.py"
|
||||
"$ROOT/devscripts/gh-pages/update-copyright.py"
|
||||
git add *.html *.html.in update
|
||||
git commit -m "release $version"
|
||||
git show HEAD
|
||||
read -p "Is it good, can I push? (y/n) " -n 1
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
|
||||
echo
|
||||
git push "$ROOT" gh-pages
|
||||
git push "$ORIGIN_URL" gh-pages
|
||||
)
|
||||
rm -r build
|
||||
|
||||
echo "\n### DONE!"
|
||||
rm -r devscripts
|
||||
git checkout $MASTER
|
||||
|
@@ -26,6 +26,7 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
socket.setdefaulttimeout(10)
|
||||
|
||||
def _try_rm(filename):
|
||||
""" Remove a file if it exists """
|
||||
@@ -81,6 +82,11 @@ def generator(test_case):
|
||||
fd.add_info_extractor(ie())
|
||||
for ien in test_case.get('add_ie', []):
|
||||
fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
|
||||
finished_hook_called = set()
|
||||
def _hook(status):
|
||||
if status['status'] == 'finished':
|
||||
finished_hook_called.add(status['filename'])
|
||||
fd.add_progress_hook(_hook)
|
||||
|
||||
test_cases = test_case.get('playlist', [test_case])
|
||||
for tc in test_cases:
|
||||
@@ -93,6 +99,7 @@ def generator(test_case):
|
||||
for tc in test_cases:
|
||||
if not test_case.get('params', {}).get('skip_download', False):
|
||||
self.assertTrue(os.path.exists(tc['file']))
|
||||
self.assertTrue(tc['file'] in finished_hook_called)
|
||||
self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
|
||||
if 'md5' in tc:
|
||||
md5_for_file = _file_md5(tc['file'])
|
||||
|
@@ -35,6 +35,24 @@
|
||||
"url": "http://www.xvideos.com/video939581/funny_porns_by_s_-1",
|
||||
"file": "939581.flv"
|
||||
},
|
||||
{
|
||||
"name": "YouPorn",
|
||||
"md5": "c37ddbaaa39058c76a7e86c6813423c1",
|
||||
"url": "http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/",
|
||||
"file": "505835.mp4"
|
||||
},
|
||||
{
|
||||
"name": "Pornotube",
|
||||
"md5": "374dd6dcedd24234453b295209aa69b6",
|
||||
"url": "http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing",
|
||||
"file": "1689755.flv"
|
||||
},
|
||||
{
|
||||
"name": "YouJizz",
|
||||
"md5": "07e15fa469ba384c7693fd246905547c",
|
||||
"url": "http://www.youjizz.com/videos/zeichentrick-1-2189178.html",
|
||||
"file": "2189178.flv"
|
||||
},
|
||||
{
|
||||
"name": "Vimeo",
|
||||
"md5": "8879b6cc097e987f02484baf890129e5",
|
||||
@@ -194,5 +212,18 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "RBMARadio",
|
||||
"url": "http://www.rbmaradio.com/shows/ford-lopatin-live-at-primavera-sound-2011",
|
||||
"file": "ford-lopatin-live-at-primavera-sound-2011.mp3",
|
||||
"md5": "6bc6f9bcb18994b4c983bc3bf4384d95",
|
||||
"info_dict": {
|
||||
"title": "Live at Primavera Sound 2011",
|
||||
"description": "Joel Ford and Daniel \u2019Oneohtrix Point Never\u2019 Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
|
||||
"uploader": "Ford & Lopatin",
|
||||
"uploader_id": "ford-lopatin",
|
||||
"location": "Spain"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@@ -81,6 +81,7 @@ class FileDownloader(object):
|
||||
writesubtitles: Write the video subtitles to a .srt file
|
||||
subtitleslang: Language of the subtitles to download
|
||||
test: Download only first bytes to test the downloader.
|
||||
keepvideo: Keep the video file after post-processing
|
||||
"""
|
||||
|
||||
params = None
|
||||
@@ -94,6 +95,7 @@ class FileDownloader(object):
|
||||
"""Create a FileDownloader object with the given options."""
|
||||
self._ies = []
|
||||
self._pps = []
|
||||
self._progress_hooks = []
|
||||
self._download_retcode = 0
|
||||
self._num_downloads = 0
|
||||
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
|
||||
@@ -529,13 +531,27 @@ class FileDownloader(object):
|
||||
return self._download_retcode
|
||||
|
||||
def post_process(self, filename, ie_info):
|
||||
"""Run the postprocessing chain on the given file."""
|
||||
"""Run all the postprocessors on the given file."""
|
||||
info = dict(ie_info)
|
||||
info['filepath'] = filename
|
||||
keep_video = None
|
||||
for pp in self._pps:
|
||||
info = pp.run(info)
|
||||
if info is None:
|
||||
break
|
||||
try:
|
||||
keep_video_wish,new_info = pp.run(info)
|
||||
if keep_video_wish is not None:
|
||||
if keep_video_wish:
|
||||
keep_video = keep_video_wish
|
||||
elif keep_video is None:
|
||||
# No clear decision yet, let IE decide
|
||||
keep_video = keep_video_wish
|
||||
except PostProcessingError as e:
|
||||
self.to_stderr(u'ERROR: ' + e.msg)
|
||||
if keep_video is False and not self.params.get('keepvideo', False):
|
||||
try:
|
||||
self.to_stderr(u'Deleting original file %s (pass -k to keep)' % filename)
|
||||
os.remove(encodeFilename(filename))
|
||||
except (IOError, OSError):
|
||||
self.to_stderr(u'WARNING: Unable to remove downloaded video file')
|
||||
|
||||
def _download_with_rtmpdump(self, filename, url, player_url, page_url):
|
||||
self.report_destination(filename)
|
||||
@@ -579,8 +595,15 @@ class FileDownloader(object):
|
||||
retval = 0
|
||||
break
|
||||
if retval == 0:
|
||||
self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename)))
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(u'\r[rtmpdump] %s bytes' % fsize)
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
else:
|
||||
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
|
||||
@@ -592,6 +615,10 @@ class FileDownloader(object):
|
||||
# Check file already present
|
||||
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
|
||||
self.report_file_already_downloaded(filename)
|
||||
self._hook_progress({
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
|
||||
# Attempt to download using rtmpdump
|
||||
@@ -605,6 +632,8 @@ class FileDownloader(object):
|
||||
|
||||
# Do not include the Accept-Encoding header
|
||||
headers = {'Youtubedl-no-compression': 'True'}
|
||||
if 'user_agent' in info_dict:
|
||||
headers['Youtubedl-user-agent'] = info_dict['user_agent']
|
||||
basic_request = compat_urllib_request.Request(url, None, headers)
|
||||
request = compat_urllib_request.Request(url, None, headers)
|
||||
|
||||
@@ -661,6 +690,10 @@ class FileDownloader(object):
|
||||
# the one in the hard drive.
|
||||
self.report_file_already_downloaded(filename)
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
else:
|
||||
# The length does not match, we start the download over
|
||||
@@ -719,6 +752,14 @@ class FileDownloader(object):
|
||||
eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
||||
self.report_progress(percent_str, data_len_str, speed_str, eta_str)
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': data_len,
|
||||
'tmpfilename': tmpfilename,
|
||||
'filename': filename,
|
||||
'status': 'downloading',
|
||||
})
|
||||
|
||||
# Apply rate limit
|
||||
self.slow_down(start, byte_counter - resume_len)
|
||||
|
||||
@@ -735,4 +776,31 @@ class FileDownloader(object):
|
||||
if self.params.get('updatetime', True):
|
||||
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': byte_counter,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
|
||||
return True
|
||||
|
||||
def _hook_progress(self, status):
|
||||
for ph in self._progress_hooks:
|
||||
ph(status)
|
||||
|
||||
def add_progress_hook(self, ph):
|
||||
""" ph gets called on download progress, with a dictionary with the entries
|
||||
* filename: The final filename
|
||||
* status: One of "downloading" and "finished"
|
||||
|
||||
It can also have some of the following entries:
|
||||
|
||||
* downloaded_bytes: Bytes on disks
|
||||
* total_bytes: Total bytes, None if unknown
|
||||
* tmpfilename: The filename we're currently writing to
|
||||
|
||||
Hooks are guaranteed to be called at least once (with status "finished")
|
||||
if the download is successful.
|
||||
"""
|
||||
self._progress_hooks.append(ph)
|
||||
|
@@ -35,15 +35,16 @@ class InfoExtractor(object):
|
||||
url: Final video URL.
|
||||
title: Video title, unescaped.
|
||||
ext: Video filename extension.
|
||||
uploader: Full name of the video uploader.
|
||||
upload_date: Video upload date (YYYYMMDD).
|
||||
|
||||
The following fields are optional:
|
||||
|
||||
format: The video format, defaults to ext (used for --get-format)
|
||||
thumbnail: Full URL to a video thumbnail image.
|
||||
description: One-line video description.
|
||||
uploader: Full name of the video uploader.
|
||||
upload_date: Video upload date (YYYYMMDD).
|
||||
uploader_id: Nickname or id of the video uploader.
|
||||
location: Physical location of the video.
|
||||
player_url: SWF Player URL (used for rtmpdump).
|
||||
subtitles: The .srt file contents.
|
||||
urlhandle: [internal] The urlHandle to be used to download the file,
|
||||
@@ -106,19 +107,24 @@ class InfoExtractor(object):
|
||||
def IE_NAME(self):
|
||||
return type(self).__name__[:-2]
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
|
||||
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns the response handle """
|
||||
if note is None:
|
||||
note = u'Downloading video webpage'
|
||||
self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
|
||||
try:
|
||||
urlh = compat_urllib_request.urlopen(url_or_request)
|
||||
webpage_bytes = urlh.read()
|
||||
return webpage_bytes.decode('utf-8', 'replace')
|
||||
return compat_urllib_request.urlopen(url_or_request)
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
if errnote is None:
|
||||
errnote = u'Unable to download webpage'
|
||||
raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns the data of the page as a string """
|
||||
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
|
||||
webpage_bytes = urlh.read()
|
||||
return webpage_bytes.decode('utf-8', 'replace')
|
||||
|
||||
|
||||
class YoutubeIE(InfoExtractor):
|
||||
"""Information extractor for youtube.com."""
|
||||
@@ -2204,6 +2210,7 @@ class BlipTVIE(InfoExtractor):
|
||||
cchar = '?'
|
||||
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
|
||||
request = compat_urllib_request.Request(json_url)
|
||||
request.add_header('User-Agent', 'iTunes/10.6.1')
|
||||
self.report_extraction(mobj.group(1))
|
||||
info = None
|
||||
try:
|
||||
@@ -2224,8 +2231,7 @@ class BlipTVIE(InfoExtractor):
|
||||
'urlhandle': urlh
|
||||
}
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
|
||||
return
|
||||
raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
|
||||
if info is None: # Regular URL
|
||||
try:
|
||||
json_code_bytes = urlh.read()
|
||||
@@ -2258,13 +2264,13 @@ class BlipTVIE(InfoExtractor):
|
||||
'format': data['media']['mimeType'],
|
||||
'thumbnail': data['thumbnailUrl'],
|
||||
'description': data['description'],
|
||||
'player_url': data['embedUrl']
|
||||
'player_url': data['embedUrl'],
|
||||
'user_agent': 'iTunes/10.6.1',
|
||||
}
|
||||
except (ValueError,KeyError) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
|
||||
return
|
||||
|
||||
std_headers['User-Agent'] = 'iTunes/10.6.1'
|
||||
return [info]
|
||||
|
||||
|
||||
@@ -3272,7 +3278,7 @@ class YoukuIE(InfoExtractor):
|
||||
class XNXXIE(InfoExtractor):
|
||||
"""Information extractor for xnxx.com"""
|
||||
|
||||
_VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
|
||||
_VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)'
|
||||
IE_NAME = u'xnxx'
|
||||
VIDEO_URL_RE = r'flv_url=(.*?)&'
|
||||
VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
|
||||
@@ -3524,17 +3530,23 @@ class JustinTVIE(InfoExtractor):
|
||||
return
|
||||
|
||||
response = json.loads(webpage)
|
||||
if type(response) != list:
|
||||
error_text = response.get('error', 'unknown error')
|
||||
self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text)
|
||||
return
|
||||
info = []
|
||||
for clip in response:
|
||||
video_url = clip['video_file_url']
|
||||
if video_url:
|
||||
video_extension = os.path.splitext(video_url)[1][1:]
|
||||
video_date = re.sub('-', '', clip['created_on'][:10])
|
||||
video_date = re.sub('-', '', clip['start_time'][:10])
|
||||
video_uploader_id = clip.get('user_id', clip.get('channel_id'))
|
||||
info.append({
|
||||
'id': clip['id'],
|
||||
'url': video_url,
|
||||
'title': clip['title'],
|
||||
'uploader': clip.get('user_id', clip.get('channel_id')),
|
||||
'uploader': clip.get('channel_name', video_uploader_id),
|
||||
'uploader_id': video_uploader_id,
|
||||
'upload_date': video_date,
|
||||
'ext': video_extension,
|
||||
})
|
||||
@@ -3553,7 +3565,7 @@ class JustinTVIE(InfoExtractor):
|
||||
paged = True
|
||||
api += '/channel/archives/%s.json'
|
||||
else:
|
||||
api += '/clip/show/%s.json'
|
||||
api += '/broadcast/by_archive/%s.json'
|
||||
api = api % (video_id,)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
@@ -3693,11 +3705,11 @@ class SteamIE(InfoExtractor):
|
||||
}
|
||||
videos.append(info)
|
||||
return videos
|
||||
|
||||
|
||||
class UstreamIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.ustream.tv/recorded/(?P<videoID>\d+)'
|
||||
_VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
|
||||
IE_NAME = u'ustream'
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('videoID')
|
||||
@@ -3716,6 +3728,251 @@ class UstreamIE(InfoExtractor):
|
||||
}
|
||||
return [info]
|
||||
|
||||
class RBMARadioIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('videoID')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
m = re.search(r'<script>window.gon = {.*?};gon\.show=(.+?);</script>', webpage)
|
||||
if not m:
|
||||
raise ExtractorError(u'Cannot find metadata')
|
||||
json_data = m.group(1)
|
||||
|
||||
try:
|
||||
data = json.loads(json_data)
|
||||
except ValueError as e:
|
||||
raise ExtractorError(u'Invalid JSON: ' + str(e))
|
||||
|
||||
video_url = data['akamai_url'] + '&cbr=256'
|
||||
url_parts = compat_urllib_parse_urlparse(video_url)
|
||||
video_ext = url_parts.path.rpartition('.')[2]
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
'title': data['title'],
|
||||
'description': data.get('teaser_text'),
|
||||
'location': data.get('country_of_origin'),
|
||||
'uploader': data.get('host', {}).get('name'),
|
||||
'uploader_id': data.get('host', {}).get('slug'),
|
||||
'thumbnail': data.get('image', {}).get('large_url_2x'),
|
||||
'duration': data.get('duration'),
|
||||
}
|
||||
return [info]
|
||||
|
||||
|
||||
class YouPornIE(InfoExtractor):
|
||||
"""Information extractor for youporn.com."""
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
|
||||
|
||||
def _print_formats(self, formats):
|
||||
"""Print all available formats"""
|
||||
print(u'Available formats:')
|
||||
print(u'ext\t\tformat')
|
||||
print(u'---------------------------------')
|
||||
for format in formats:
|
||||
print(u'%s\t\t%s' % (format['ext'], format['format']))
|
||||
|
||||
def _specific(self, req_format, formats):
|
||||
for x in formats:
|
||||
if(x["format"]==req_format):
|
||||
return x
|
||||
return None
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||
return
|
||||
|
||||
video_id = mobj.group('videoid')
|
||||
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('Cookie', 'age_verified=1')
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
|
||||
# Get the video title
|
||||
result = re.search(r'videoTitleArea">(?P<title>.*)</h1>', webpage)
|
||||
if result is None:
|
||||
raise ExtractorError(u'ERROR: unable to extract video title')
|
||||
video_title = result.group('title').strip()
|
||||
|
||||
# Get the video date
|
||||
result = re.search(r'Date:</b>(?P<date>.*)</li>', webpage)
|
||||
if result is None:
|
||||
self._downloader.to_stderr(u'WARNING: unable to extract video date')
|
||||
upload_date = None
|
||||
else:
|
||||
upload_date = result.group('date').strip()
|
||||
|
||||
# Get the video uploader
|
||||
result = re.search(r'Submitted:</b>(?P<uploader>.*)</li>', webpage)
|
||||
if result is None:
|
||||
self._downloader.to_stderr(u'ERROR: unable to extract uploader')
|
||||
video_uploader = None
|
||||
else:
|
||||
video_uploader = result.group('uploader').strip()
|
||||
video_uploader = clean_html( video_uploader )
|
||||
|
||||
# Get all of the formats available
|
||||
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
|
||||
result = re.search(DOWNLOAD_LIST_RE, webpage)
|
||||
if result is None:
|
||||
raise ExtractorError(u'Unable to extract download list')
|
||||
download_list_html = result.group('download_list').strip()
|
||||
|
||||
# Get all of the links from the page
|
||||
LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
|
||||
links = re.findall(LINK_RE, download_list_html)
|
||||
if(len(links) == 0):
|
||||
raise ExtractorError(u'ERROR: no known formats available for video')
|
||||
|
||||
self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))
|
||||
|
||||
formats = []
|
||||
for link in links:
|
||||
|
||||
# A link looks like this:
|
||||
# http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
|
||||
# A path looks like this:
|
||||
# /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
|
||||
video_url = unescapeHTML( link )
|
||||
path = compat_urllib_parse_urlparse( video_url ).path
|
||||
extension = os.path.splitext( path )[1][1:]
|
||||
format = path.split('/')[4].split('_')[:2]
|
||||
size = format[0]
|
||||
bitrate = format[1]
|
||||
format = "-".join( format )
|
||||
title = u'%s-%s-%s' % (video_title, size, bitrate)
|
||||
|
||||
formats.append({
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': upload_date,
|
||||
'title': title,
|
||||
'ext': extension,
|
||||
'format': format,
|
||||
'thumbnail': None,
|
||||
'description': None,
|
||||
'player_url': None
|
||||
})
|
||||
|
||||
if self._downloader.params.get('listformats', None):
|
||||
self._print_formats(formats)
|
||||
return
|
||||
|
||||
req_format = self._downloader.params.get('format', None)
|
||||
self._downloader.to_screen(u'[youporn] Format: %s' % req_format)
|
||||
|
||||
if req_format is None or req_format == 'best':
|
||||
return [formats[0]]
|
||||
elif req_format == 'worst':
|
||||
return [formats[-1]]
|
||||
elif req_format in ('-1', 'all'):
|
||||
return formats
|
||||
else:
|
||||
format = self._specific( req_format, formats )
|
||||
if result is None:
|
||||
self._downloader.trouble(u'ERROR: requested format not available')
|
||||
return
|
||||
return [format]
|
||||
|
||||
|
||||
|
||||
class PornotubeIE(InfoExtractor):
|
||||
"""Information extractor for pornotube.com."""
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||
return
|
||||
|
||||
video_id = mobj.group('videoid')
|
||||
video_title = mobj.group('title')
|
||||
|
||||
# Get webpage content
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# Get the video URL
|
||||
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
|
||||
result = re.search(VIDEO_URL_RE, webpage)
|
||||
if result is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video url')
|
||||
return
|
||||
video_url = compat_urllib_parse.unquote(result.group('url'))
|
||||
|
||||
#Get the uploaded date
|
||||
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
||||
result = re.search(VIDEO_UPLOADED_RE, webpage)
|
||||
if result is None:
|
||||
self._downloader.trouble(u'ERROR: unable to extract video title')
|
||||
return
|
||||
upload_date = result.group('date')
|
||||
|
||||
info = {'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': 'flv',
|
||||
'format': 'flv'}
|
||||
|
||||
return [info]
|
||||
|
||||
|
||||
|
||||
class YouJizzIE(InfoExtractor):
|
||||
"""Information extractor for youjizz.com."""
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
||||
return
|
||||
|
||||
video_id = mobj.group('videoid')
|
||||
|
||||
# Get webpage content
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# Get the video title
|
||||
result = re.search(r'<title>(?P<title>.*)</title>', webpage)
|
||||
if result is None:
|
||||
raise ExtractorError(u'ERROR: unable to extract video title')
|
||||
video_title = result.group('title').strip()
|
||||
|
||||
# Get the embed page
|
||||
result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
|
||||
if result is None:
|
||||
raise ExtractorError(u'ERROR: unable to extract embed page')
|
||||
|
||||
embed_page_url = result.group(0).strip()
|
||||
video_id = result.group('videoid')
|
||||
|
||||
webpage = self._download_webpage(embed_page_url, video_id)
|
||||
|
||||
# Get the video URL
|
||||
result = re.search(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);', webpage)
|
||||
if result is None:
|
||||
raise ExtractorError(u'ERROR: unable to extract video url')
|
||||
video_url = result.group('source')
|
||||
|
||||
info = {'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'ext': 'flv',
|
||||
'format': 'flv',
|
||||
'player_url': embed_page_url}
|
||||
|
||||
return [info]
|
||||
|
||||
|
||||
def gen_extractors():
|
||||
""" Return a list of an instance of every supported extractor.
|
||||
@@ -3750,6 +4007,9 @@ def gen_extractors():
|
||||
MTVIE(),
|
||||
YoukuIE(),
|
||||
XNXXIE(),
|
||||
YouJizzIE(),
|
||||
PornotubeIE(),
|
||||
YouPornIE(),
|
||||
GooglePlusIE(),
|
||||
ArteTvIE(),
|
||||
NBAIE(),
|
||||
@@ -3758,6 +4018,7 @@ def gen_extractors():
|
||||
TweetReelIE(),
|
||||
SteamIE(),
|
||||
UstreamIE(),
|
||||
RBMARadioIE(),
|
||||
GenericIE()
|
||||
]
|
||||
|
||||
|
@@ -45,31 +45,24 @@ class PostProcessor(object):
|
||||
one has an extra field called "filepath" that points to the
|
||||
downloaded file.
|
||||
|
||||
When this method returns None, the postprocessing chain is
|
||||
stopped. However, this method may return an information
|
||||
dictionary that will be passed to the next postprocessing
|
||||
object in the chain. It can be the one it received after
|
||||
changing some fields.
|
||||
This method returns a tuple, the first element of which describes
|
||||
whether the original file should be kept (i.e. not deleted - None for
|
||||
no preference), and the second of which is the updated information.
|
||||
|
||||
In addition, this method may raise a PostProcessingError
|
||||
exception that will be taken into account by the downloader
|
||||
it was called from.
|
||||
exception if post processing fails.
|
||||
"""
|
||||
return information # by default, do nothing
|
||||
return None, information # by default, keep file and do nothing
|
||||
|
||||
class AudioConversionError(BaseException):
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
class FFmpegPostProcessorError(PostProcessingError):
|
||||
pass
|
||||
|
||||
class FFmpegExtractAudioPP(PostProcessor):
|
||||
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False, nopostoverwrites=False):
|
||||
class AudioConversionError(PostProcessingError):
|
||||
pass
|
||||
|
||||
class FFmpegPostProcessor(PostProcessor):
|
||||
def __init__(self,downloader=None):
|
||||
PostProcessor.__init__(self, downloader)
|
||||
if preferredcodec is None:
|
||||
preferredcodec = 'best'
|
||||
self._preferredcodec = preferredcodec
|
||||
self._preferredquality = preferredquality
|
||||
self._keepvideo = keepvideo
|
||||
self._nopostoverwrites = nopostoverwrites
|
||||
self._exes = self.detect_executables()
|
||||
|
||||
@staticmethod
|
||||
@@ -83,10 +76,37 @@ class FFmpegExtractAudioPP(PostProcessor):
|
||||
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
||||
return dict((program, executable(program)) for program in programs)
|
||||
|
||||
def run_ffmpeg(self, path, out_path, opts):
|
||||
if not self._exes['ffmpeg'] and not self._exes['avconv']:
|
||||
raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
|
||||
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path)]
|
||||
+ opts +
|
||||
[encodeFilename(self._ffmpeg_filename_argument(out_path))])
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout,stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
msg = stderr.strip().split('\n')[-1]
|
||||
raise FFmpegPostProcessorError(msg.decode('utf-8', 'replace'))
|
||||
|
||||
def _ffmpeg_filename_argument(self, fn):
|
||||
# ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
|
||||
if fn.startswith(u'-'):
|
||||
return u'./' + fn
|
||||
return fn
|
||||
|
||||
class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
|
||||
FFmpegPostProcessor.__init__(self, downloader)
|
||||
if preferredcodec is None:
|
||||
preferredcodec = 'best'
|
||||
self._preferredcodec = preferredcodec
|
||||
self._preferredquality = preferredquality
|
||||
self._nopostoverwrites = nopostoverwrites
|
||||
|
||||
def get_audio_codec(self, path):
|
||||
if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
|
||||
try:
|
||||
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)]
|
||||
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path))]
|
||||
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
|
||||
output = handle.communicate()[0]
|
||||
if handle.wait() != 0:
|
||||
@@ -108,22 +128,18 @@ class FFmpegExtractAudioPP(PostProcessor):
|
||||
acodec_opts = []
|
||||
else:
|
||||
acodec_opts = ['-acodec', codec]
|
||||
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn']
|
||||
+ acodec_opts + more_opts +
|
||||
['--', encodeFilename(out_path)])
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout,stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
msg = stderr.strip().split('\n')[-1]
|
||||
raise AudioConversionError(msg)
|
||||
opts = ['-vn'] + acodec_opts + more_opts
|
||||
try:
|
||||
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
|
||||
except FFmpegPostProcessorError as err:
|
||||
raise AudioConversionError(err.message)
|
||||
|
||||
def run(self, information):
|
||||
path = information['filepath']
|
||||
|
||||
filecodec = self.get_audio_codec(path)
|
||||
if filecodec is None:
|
||||
self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
|
||||
return None
|
||||
raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
|
||||
|
||||
more_opts = []
|
||||
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
|
||||
@@ -132,7 +148,7 @@ class FFmpegExtractAudioPP(PostProcessor):
|
||||
acodec = 'copy'
|
||||
extension = self._preferredcodec
|
||||
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
|
||||
elif filecodec in ['aac', 'mp3', 'vorbis']:
|
||||
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
|
||||
# Lossless if possible
|
||||
acodec = 'copy'
|
||||
extension = filecodec
|
||||
@@ -152,7 +168,7 @@ class FFmpegExtractAudioPP(PostProcessor):
|
||||
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
|
||||
else:
|
||||
# We convert the audio (lossy)
|
||||
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
|
||||
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
|
||||
extension = self._preferredcodec
|
||||
more_opts = []
|
||||
if self._preferredquality is not None:
|
||||
@@ -181,10 +197,10 @@ class FFmpegExtractAudioPP(PostProcessor):
|
||||
except:
|
||||
etype,e,tb = sys.exc_info()
|
||||
if isinstance(e, AudioConversionError):
|
||||
self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
|
||||
msg = u'audio conversion failed: ' + e.message
|
||||
else:
|
||||
self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg'))
|
||||
return None
|
||||
msg = u'error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')
|
||||
raise PostProcessingError(msg)
|
||||
|
||||
# Try to update the date time for extracted audio file.
|
||||
if information.get('filetime') is not None:
|
||||
@@ -193,12 +209,24 @@ class FFmpegExtractAudioPP(PostProcessor):
|
||||
except:
|
||||
self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
|
||||
|
||||
if not self._keepvideo:
|
||||
try:
|
||||
os.remove(encodeFilename(path))
|
||||
except (IOError, OSError):
|
||||
self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
|
||||
return None
|
||||
|
||||
information['filepath'] = new_path
|
||||
return information
|
||||
return False,information
|
||||
|
||||
class FFmpegVideoConvertor(FFmpegPostProcessor):
|
||||
def __init__(self, downloader=None,preferedformat=None):
|
||||
super(FFmpegVideoConvertor, self).__init__(downloader)
|
||||
self._preferedformat=preferedformat
|
||||
|
||||
def run(self, information):
|
||||
path = information['filepath']
|
||||
prefix, sep, ext = path.rpartition(u'.')
|
||||
outpath = prefix + sep + self._preferedformat
|
||||
if information['ext'] == self._preferedformat:
|
||||
self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
|
||||
return True,information
|
||||
self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
|
||||
self.run_ffmpeg(path, outpath, [])
|
||||
information['filepath'] = outpath
|
||||
information['format'] = self._preferedformat
|
||||
information['ext'] = self._preferedformat
|
||||
return False,information
|
||||
|
@@ -22,6 +22,7 @@ __authors__ = (
|
||||
'Christian Albrecht',
|
||||
'Dave Vasilevsky',
|
||||
'Jaime Marquínez Ferrándiz',
|
||||
'Jeff Crouse',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
@@ -175,7 +176,6 @@ def parseOpts():
|
||||
action='store', dest='subtitleslang', metavar='LANG',
|
||||
help='language of the closed captions to download (optional) use IETF language tags like \'en\'')
|
||||
|
||||
|
||||
verbosity.add_option('-q', '--quiet',
|
||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
||||
verbosity.add_option('-s', '--simulate',
|
||||
@@ -248,9 +248,11 @@ def parseOpts():
|
||||
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
|
||||
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
||||
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
||||
help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default')
|
||||
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
||||
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
|
||||
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
||||
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
|
||||
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm)')
|
||||
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
|
||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
||||
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
||||
@@ -370,12 +372,15 @@ def _real_main():
|
||||
except (TypeError, ValueError) as err:
|
||||
parser.error(u'invalid playlist end number specified')
|
||||
if opts.extractaudio:
|
||||
if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
|
||||
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
|
||||
parser.error(u'invalid audio format specified')
|
||||
if opts.audioquality:
|
||||
opts.audioquality = opts.audioquality.strip('k').strip('K')
|
||||
if not opts.audioquality.isdigit():
|
||||
parser.error(u'invalid audio quality specified')
|
||||
if opts.recodevideo is not None:
|
||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg']:
|
||||
parser.error(u'invalid video recode format specified')
|
||||
|
||||
if sys.version_info < (3,):
|
||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||
@@ -432,6 +437,7 @@ def _real_main():
|
||||
'prefer_free_formats': opts.prefer_free_formats,
|
||||
'verbose': opts.verbose,
|
||||
'test': opts.test,
|
||||
'keepvideo': opts.keepvideo,
|
||||
})
|
||||
|
||||
if opts.verbose:
|
||||
@@ -453,7 +459,9 @@ def _real_main():
|
||||
|
||||
# PostProcessors
|
||||
if opts.extractaudio:
|
||||
fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo, nopostoverwrites=opts.nopostoverwrites))
|
||||
fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites))
|
||||
if opts.recodevideo:
|
||||
fd.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo))
|
||||
|
||||
# Update version
|
||||
if opts.update_self:
|
||||
|
@@ -450,7 +450,8 @@ class PostProcessingError(Exception):
|
||||
This exception may be raised by PostProcessor's .run() method to
|
||||
indicate an error in the postprocessing task.
|
||||
"""
|
||||
pass
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
class MaxDownloadsReached(Exception):
|
||||
""" --max-downloads limit has been reached. """
|
||||
@@ -515,14 +516,19 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
||||
return ret
|
||||
|
||||
def http_request(self, req):
|
||||
for h in std_headers:
|
||||
for h,v in std_headers.items():
|
||||
if h in req.headers:
|
||||
del req.headers[h]
|
||||
req.add_header(h, std_headers[h])
|
||||
req.add_header(h, v)
|
||||
if 'Youtubedl-no-compression' in req.headers:
|
||||
if 'Accept-encoding' in req.headers:
|
||||
del req.headers['Accept-encoding']
|
||||
del req.headers['Youtubedl-no-compression']
|
||||
if 'Youtubedl-user-agent' in req.headers:
|
||||
if 'User-agent' in req.headers:
|
||||
del req.headers['User-agent']
|
||||
req.headers['User-agent'] = req.headers['Youtubedl-user-agent']
|
||||
del req.headers['Youtubedl-user-agent']
|
||||
return req
|
||||
|
||||
def http_response(self, req, resp):
|
||||
|
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2013.01.06'
|
||||
__version__ = '2013.01.13'
|
||||
|
Reference in New Issue
Block a user