aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/FileDownloader.py1
-rw-r--r--youtube_dl/InfoExtractors.py213
-rw-r--r--youtube_dl/PostProcessor.py10
-rw-r--r--youtube_dl/__init__.py23
-rw-r--r--youtube_dl/utils.py2
5 files changed, 229 insertions, 20 deletions
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index 14e872a98..38c6a519a 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -474,6 +474,7 @@ class FileDownloader(object):
# Extract information from URL and process it
videos = ie.extract(url)
for video in videos or []:
+ video['extractor'] = ie.IE_NAME
try:
self.increment_downloads()
self.process_info(video)
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 3875e7fd8..bdb2ec311 100644
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -13,6 +13,8 @@ import urllib
import urllib2
import email.utils
import xml.etree.ElementTree
+import random
+import math
from urlparse import parse_qs
try:
@@ -98,7 +100,8 @@ class YoutubeIE(InfoExtractor):
_VALID_URL = r"""^
(
(?:https?://)? # http(s):// (optional)
- (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/) # the various hostnames, with wildcard subdomains
+ (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
+ tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
(?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/) # v/ or embed/ or e/
@@ -423,7 +426,7 @@ class YoutubeIE(InfoExtractor):
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
url_data = [parse_qs(uds) for uds in url_data_strs]
url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
- url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data)
+ url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
format_limit = self._downloader.params.get('format_limit', None)
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
@@ -636,7 +639,7 @@ class DailymotionIE(InfoExtractor):
video_id = mobj.group(1)
- video_extension = 'flv'
+ video_extension = 'mp4'
# Retrieve video webpage to extract further information
request = urllib2.Request(url)
@@ -650,20 +653,21 @@ class DailymotionIE(InfoExtractor):
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
- mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage)
+ mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
- sequence = urllib.unquote(mobj.group(1))
- mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence)
+ flashvars = urllib.unquote(mobj.group(1))
+ if 'hqURL' in flashvars: max_quality = 'hqURL'
+ elif 'sdURL' in flashvars: max_quality = 'sdURL'
+ else: max_quality = 'ldURL'
+ mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
- mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '')
-
- # if needed add http://www.dailymotion.com/ if relative URL
+ video_url = mobj.group(1).replace('\\/', '/')
- video_url = mediaURL
+ # TODO: support choosing qualities
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None:
@@ -1492,7 +1496,7 @@ class YoutubePlaylistIE(InfoExtractor):
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
- _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;list=(PL)?%s&'
+ _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;list=.*?%s'
_MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
IE_NAME = u'youtube:playlist'
@@ -2976,3 +2980,190 @@ class MTVIE(InfoExtractor):
}
return [info]
+
+
+class YoukuIE(InfoExtractor):
+
+ _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
+ IE_NAME = u'Youku'
+
+ def __init__(self, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+
+ def report_download_webpage(self, file_id):
+ """Report webpage download."""
+ self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
+
+ def report_extraction(self, file_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
+
+ def _gen_sid(self):
+ nowTime = int(time.time() * 1000)
+ random1 = random.randint(1000,1998)
+ random2 = random.randint(1000,9999)
+
+ return "%d%d%d" %(nowTime,random1,random2)
+
+ def _get_file_ID_mix_string(self, seed):
+ mixed = []
+ source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
+ seed = float(seed)
+ for i in range(len(source)):
+ seed = (seed * 211 + 30031 ) % 65536
+ index = math.floor(seed / 65536 * len(source) )
+ mixed.append(source[int(index)])
+ source.remove(source[int(index)])
+ #return ''.join(mixed)
+ return mixed
+
+ def _get_file_id(self, fileId, seed):
+ mixed = self._get_file_ID_mix_string(seed)
+ ids = fileId.split('*')
+ realId = []
+ for ch in ids:
+ if ch:
+ realId.append(mixed[int(ch)])
+ return ''.join(realId)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group('ID')
+
+ info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
+
+ request = urllib2.Request(info_url, None, std_headers)
+ try:
+ self.report_download_webpage(video_id)
+ jsondata = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+ return
+
+ self.report_extraction(video_id)
+ try:
+ config = json.loads(jsondata)
+
+ video_title = config['data'][0]['title']
+ seed = config['data'][0]['seed']
+
+ format = self._downloader.params.get('format', None)
+ supported_format = config['data'][0]['streamfileids'].keys()
+
+ if format is None or format == 'best':
+ if 'hd2' in supported_format:
+ format = 'hd2'
+ else:
+ format = 'flv'
+ ext = u'flv'
+ elif format == 'worst':
+ format = 'mp4'
+ ext = u'mp4'
+ else:
+ format = 'flv'
+ ext = u'flv'
+
+
+ fileid = config['data'][0]['streamfileids'][format]
+ seg_number = len(config['data'][0]['segs'][format])
+
+ keys=[]
+ for i in xrange(seg_number):
+ keys.append(config['data'][0]['segs'][format][i]['k'])
+
+ #TODO check error
+ #youku only could be viewed from mainland china
+ except:
+ self._downloader.trouble(u'ERROR: unable to extract info section')
+ return
+
+ files_info=[]
+ sid = self._gen_sid()
+ fileid = self._get_file_id(fileid, seed)
+
+ #column 8,9 of fileid represent the segment number
+ #fileid[7:9] should be changed
+ for index, key in enumerate(keys):
+
+ temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
+ download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+
+ info = {
+ 'id': '%s_part%02d' % (video_id, index),
+ 'url': download_url,
+ 'uploader': None,
+ 'title': video_title,
+ 'ext': ext,
+ 'format': u'NA'
+ }
+ files_info.append(info)
+
+ return files_info
+
+
+class XNXXIE(InfoExtractor):
+ """Information extractor for xnxx.com"""
+
+ _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
+ IE_NAME = u'xnxx'
+ VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
+ VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
+ VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
+
+ def report_webpage(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+ def report_extraction(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group(1).decode('utf-8')
+
+ self.report_webpage(video_id)
+
+ # Get webpage content
+ try:
+ webpage = urllib2.urlopen(url).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+ return
+
+ result = re.search(self.VIDEO_URL_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video url')
+ return
+ video_url = urllib.unquote(result.group(1).decode('utf-8'))
+
+ result = re.search(self.VIDEO_TITLE_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video title')
+ return
+ video_title = result.group(1).decode('utf-8')
+
+ result = re.search(self.VIDEO_THUMB_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+ return
+ video_thumbnail = result.group(1).decode('utf-8')
+
+ info = {'id': video_id,
+ 'url': video_url,
+ 'uploader': None,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': 'flv',
+ 'format': 'flv',
+ 'thumbnail': video_thumbnail,
+ 'description': None,
+ 'player_url': None}
+
+ return [info]
diff --git a/youtube_dl/PostProcessor.py b/youtube_dl/PostProcessor.py
index 527dc3a3d..375da1aa3 100644
--- a/youtube_dl/PostProcessor.py
+++ b/youtube_dl/PostProcessor.py
@@ -142,14 +142,20 @@ class FFmpegExtractAudioPP(PostProcessor):
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
- more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality]
+ if int(self._preferredquality) < 10:
+ more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
+ else:
+ more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality]
else:
# We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
- more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality]
+ if int(self._preferredquality) < 10:
+ more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
+ else:
+ more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality]
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 86951840d..9fe627e2a 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -19,7 +19,7 @@ __authors__ = (
)
__license__ = 'Public Domain'
-__version__ = '2012.02.27'
+__version__ = '2012.09.27'
UPDATE_URL = 'https://raw.github.com/rg3/youtube-dl/master/youtube-dl'
UPDATE_URL_VERSION = 'https://raw.github.com/rg3/youtube-dl/master/LATEST_VERSION'
@@ -186,16 +186,18 @@ def parseOpts():
general.add_option('-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
general.add_option('-R', '--retries',
- dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
+ dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
general.add_option('--dump-user-agent',
action='store_true', dest='dump_user_agent',
help='display the current browser identification', default=False)
+ general.add_option('--user-agent',
+ dest='user_agent', help='specify a custom user agent', metavar='UA')
general.add_option('--list-extractors',
action='store_true', dest='list_extractors',
help='List all supported extractors and the URLs they would handle', default=False)
selection.add_option('--playlist-start',
- dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
+ dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1)
selection.add_option('--playlist-end',
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1)
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
@@ -267,7 +269,7 @@ def parseOpts():
action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output',
- dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(stitle)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), and %% for a literal percent. Use - to output to stdout.')
+ dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(stitle)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(provider)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout.')
filesystem.add_option('-a', '--batch-file',
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
filesystem.add_option('-w', '--no-overwrites',
@@ -296,8 +298,8 @@ def parseOpts():
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default')
- postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='128K',
- help='ffmpeg/avconv audio bitrate specification, 128k by default')
+ postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
+ help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
help='keeps the video file on disk after the post-processing; the video is erased by default')
@@ -351,6 +353,8 @@ def gen_extractors():
MixcloudIE(),
StanfordOpenClassroomIE(),
MTVIE(),
+ YoukuIE(),
+ XNXXIE(),
GenericIE()
]
@@ -368,6 +372,9 @@ def _real_main():
jar.load()
except (IOError, OSError), err:
sys.exit(u'ERROR: unable to open cookie file')
+ # Set user agent
+ if opts.user_agent is not None:
+ std_headers['User-Agent'] = opts.user_agent
# Dump user agent
if opts.dump_user_agent:
@@ -444,6 +451,10 @@ def _real_main():
if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
parser.error(u'invalid audio format specified')
+ if opts.audioquality:
+ opts.audioquality = opts.audioquality.strip('k').strip('K')
+ if not opts.audioquality.isdigit():
+ parser.error(u'invalid audio quality specified')
# File downloader
fd = FileDownloader({
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 922e17ecc..839da17d0 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -19,7 +19,7 @@ except ImportError:
import StringIO
std_headers = {
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:5.0.1) Gecko/20100101 Firefox/5.0.1',
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',