aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/InfoExtractors.py
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl/InfoExtractors.py')
-rw-r--r--youtube_dl/InfoExtractors.py213
1 files changed, 202 insertions, 11 deletions
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 3875e7fd8..bdb2ec311 100644
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -13,6 +13,8 @@ import urllib
import urllib2
import email.utils
import xml.etree.ElementTree
+import random
+import math
from urlparse import parse_qs
try:
@@ -98,7 +100,8 @@ class YoutubeIE(InfoExtractor):
_VALID_URL = r"""^
(
(?:https?://)? # http(s):// (optional)
- (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/) # the various hostnames, with wildcard subdomains
+ (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|
+ tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains
(?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/) # v/ or embed/ or e/
@@ -423,7 +426,7 @@ class YoutubeIE(InfoExtractor):
url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')
url_data = [parse_qs(uds) for uds in url_data_strs]
url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data)
- url_map = dict((ud['itag'][0], ud['url'][0]) for ud in url_data)
+ url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)
format_limit = self._downloader.params.get('format_limit', None)
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
@@ -636,7 +639,7 @@ class DailymotionIE(InfoExtractor):
video_id = mobj.group(1)
- video_extension = 'flv'
+ video_extension = 'mp4'
# Retrieve video webpage to extract further information
request = urllib2.Request(url)
@@ -650,20 +653,21 @@ class DailymotionIE(InfoExtractor):
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
- mobj = re.search(r'(?i)addVariable\(\"sequence\"\s*,\s*\"([^\"]+?)\"\)', webpage)
+ mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
- sequence = urllib.unquote(mobj.group(1))
- mobj = re.search(r',\"sdURL\"\:\"([^\"]+?)\",', sequence)
+ flashvars = urllib.unquote(mobj.group(1))
+ if 'hqURL' in flashvars: max_quality = 'hqURL'
+ elif 'sdURL' in flashvars: max_quality = 'sdURL'
+ else: max_quality = 'ldURL'
+ mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
- mediaURL = urllib.unquote(mobj.group(1)).replace('\\', '')
-
- # if needed add http://www.dailymotion.com/ if relative URL
+ video_url = mobj.group(1).replace('\\/', '/')
- video_url = mediaURL
+ # TODO: support choosing qualities
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None:
@@ -1492,7 +1496,7 @@ class YoutubePlaylistIE(InfoExtractor):
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
- _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;list=(PL)?%s&'
+ _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;list=.*?%s'
_MORE_PAGES_INDICATOR = r'yt-uix-pager-next'
IE_NAME = u'youtube:playlist'
@@ -2976,3 +2980,190 @@ class MTVIE(InfoExtractor):
}
return [info]
+
+
+class YoukuIE(InfoExtractor):
+
+ _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
+ IE_NAME = u'Youku'
+
+ def __init__(self, downloader=None):
+ InfoExtractor.__init__(self, downloader)
+
+ def report_download_webpage(self, file_id):
+ """Report webpage download."""
+ self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id)
+
+ def report_extraction(self, file_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id)
+
+ def _gen_sid(self):
+ nowTime = int(time.time() * 1000)
+ random1 = random.randint(1000,1998)
+ random2 = random.randint(1000,9999)
+
+ return "%d%d%d" %(nowTime,random1,random2)
+
+ def _get_file_ID_mix_string(self, seed):
+ mixed = []
+ source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
+ seed = float(seed)
+ for i in range(len(source)):
+ seed = (seed * 211 + 30031 ) % 65536
+ index = math.floor(seed / 65536 * len(source) )
+ mixed.append(source[int(index)])
+ source.remove(source[int(index)])
+ #return ''.join(mixed)
+ return mixed
+
+ def _get_file_id(self, fileId, seed):
+ mixed = self._get_file_ID_mix_string(seed)
+ ids = fileId.split('*')
+ realId = []
+ for ch in ids:
+ if ch:
+ realId.append(mixed[int(ch)])
+ return ''.join(realId)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group('ID')
+
+ info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
+
+ request = urllib2.Request(info_url, None, std_headers)
+ try:
+ self.report_download_webpage(video_id)
+ jsondata = urllib2.urlopen(request).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err))
+ return
+
+ self.report_extraction(video_id)
+ try:
+ config = json.loads(jsondata)
+
+ video_title = config['data'][0]['title']
+ seed = config['data'][0]['seed']
+
+ format = self._downloader.params.get('format', None)
+ supported_format = config['data'][0]['streamfileids'].keys()
+
+ if format is None or format == 'best':
+ if 'hd2' in supported_format:
+ format = 'hd2'
+ else:
+ format = 'flv'
+ ext = u'flv'
+ elif format == 'worst':
+ format = 'mp4'
+ ext = u'mp4'
+ else:
+ format = 'flv'
+ ext = u'flv'
+
+
+ fileid = config['data'][0]['streamfileids'][format]
+ seg_number = len(config['data'][0]['segs'][format])
+
+ keys=[]
+ for i in xrange(seg_number):
+ keys.append(config['data'][0]['segs'][format][i]['k'])
+
+ #TODO check error
+ #youku only could be viewed from mainland china
+ except:
+ self._downloader.trouble(u'ERROR: unable to extract info section')
+ return
+
+ files_info=[]
+ sid = self._gen_sid()
+ fileid = self._get_file_id(fileid, seed)
+
+ #column 8,9 of fileid represent the segment number
+ #fileid[7:9] should be changed
+ for index, key in enumerate(keys):
+
+ temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
+ download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
+
+ info = {
+ 'id': '%s_part%02d' % (video_id, index),
+ 'url': download_url,
+ 'uploader': None,
+ 'title': video_title,
+ 'ext': ext,
+ 'format': u'NA'
+ }
+ files_info.append(info)
+
+ return files_info
+
+
+class XNXXIE(InfoExtractor):
+ """Information extractor for xnxx.com"""
+
+ _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)'
+ IE_NAME = u'xnxx'
+ VIDEO_URL_RE = r'flv_url=(.*?)&amp;'
+ VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
+ VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;'
+
+ def report_webpage(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
+
+ def report_extraction(self, video_id):
+ """Report information extraction"""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+ video_id = mobj.group(1).decode('utf-8')
+
+ self.report_webpage(video_id)
+
+ # Get webpage content
+ try:
+ webpage = urllib2.urlopen(url).read()
+ except (urllib2.URLError, httplib.HTTPException, socket.error), err:
+ self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+ return
+
+ result = re.search(self.VIDEO_URL_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video url')
+ return
+ video_url = urllib.unquote(result.group(1).decode('utf-8'))
+
+ result = re.search(self.VIDEO_TITLE_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video title')
+ return
+ video_title = result.group(1).decode('utf-8')
+
+ result = re.search(self.VIDEO_THUMB_RE, webpage)
+ if result is None:
+ self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+ return
+ video_thumbnail = result.group(1).decode('utf-8')
+
+ info = {'id': video_id,
+ 'url': video_url,
+ 'uploader': None,
+ 'upload_date': None,
+ 'title': video_title,
+ 'ext': 'flv',
+ 'format': 'flv',
+ 'thumbnail': video_thumbnail,
+ 'description': None,
+ 'player_url': None}
+
+ return [info]