diff options
Diffstat (limited to 'youtube_dl/InfoExtractors.py')
| -rw-r--r-- | youtube_dl/InfoExtractors.py | 216 | 
1 files changed, 148 insertions, 68 deletions
| diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 88973cce8..13b04ab5b 100644 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -102,6 +102,7 @@ class YoutubeIE(InfoExtractor):  	                     (?:https?://)?                                       # http(s):// (optional)  	                     (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/|  	                     	tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains +	                     (?:.*?\#/)?                                          # handle anchor (#/) redirect urls  	                     (?!view_play_list|my_playlists|artist|playlist)      # ignore playlist URLs  	                     (?:                                                  # the various things that can precede the ID:  	                         (?:(?:v|embed|e)/)                               # v/ or embed/ or e/ @@ -212,9 +213,9 @@ class YoutubeIE(InfoExtractor):  		return srt  	def _print_formats(self, formats): -		print 'Available formats:' +		print('Available formats:')  		for x in formats: -			print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')) +			print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')))  	def _real_initialize(self):  		if self._downloader is None: @@ -237,7 +238,7 @@ class YoutubeIE(InfoExtractor):  				else:  					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)  			except (IOError, netrc.NetrcParseError), err: -				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) +				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))  				return  		# Set language @@ -246,7 +247,7 @@ class YoutubeIE(InfoExtractor):  			self.report_lang()  			urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err)) +			self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))  			return  		# No authentication to be performed @@ -269,7 +270,7 @@ class YoutubeIE(InfoExtractor):  				self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')  				return  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) +			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))  			return  		# Confirm age @@ -282,7 +283,7 @@ class YoutubeIE(InfoExtractor):  			self.report_age_confirmation()  			age_results = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))  			return  	def _real_extract(self, url): @@ -304,7 +305,7 @@ class YoutubeIE(InfoExtractor):  		try:  			video_webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))  			return  		# Attempt to extract SWF player URL @@ -326,7 +327,7 @@ class YoutubeIE(InfoExtractor):  				if 'token' in video_info:  					break  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))  				return  		if 'token' not in video_info:  			if 'reason' in video_info: @@ -389,7 +390,7 @@ class YoutubeIE(InfoExtractor):  				try:  					srt_list = urllib2.urlopen(request).read()  				except (urllib2.URLError, httplib.HTTPException, socket.error), err: -					raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) +					raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))  				srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)  				srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)  				if not srt_lang_list: @@ -406,13 +407,19 @@ class YoutubeIE(InfoExtractor):  				try:  					srt_xml = urllib2.urlopen(request).read()  				except (urllib2.URLError, httplib.HTTPException, socket.error), err: -					raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) +					raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))  				if not srt_xml:  					raise Trouble(u'WARNING: unable to download video subtitles')  				video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8'))  			except Trouble as trouble:  				self._downloader.trouble(trouble[0]) +		if 'length_seconds' not in video_info: +			self._downloader.trouble(u'WARNING: unable to extract video duration') +			video_duration = '' +		else: +			video_duration = urllib.unquote_plus(video_info['length_seconds'][0]) +  		# token  		video_token = urllib.unquote_plus(video_info['token'][0]) @@ -479,7 +486,8 @@ class YoutubeIE(InfoExtractor):  				'thumbnail':	video_thumbnail.decode('utf-8'),  				'description':	video_description,  				'player_url':	player_url, -				'subtitles':	video_subtitles +				'subtitles':	video_subtitles, +				'duration':		video_duration  			})  		return results @@ -518,7 +526,7 @@ class MetacafeIE(InfoExtractor):  			self.report_disclaimer()  			disclaimer = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))  			return  		# Confirm age @@ -531,7 +539,7 @@ class MetacafeIE(InfoExtractor):  			self.report_age_confirmation()  			disclaimer = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))  			return  	def _real_extract(self, url): @@ -555,7 +563,7 @@ class MetacafeIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))  			return  		# Extract URL, uploader and title from webpage @@ -595,7 +603,7 @@ class MetacafeIE(InfoExtractor):  			return  		video_title = mobj.group(1).decode('utf-8') -		mobj = re.search(r'(?ms)By:\s*<a .*?>(.+?)<', webpage) +		mobj = re.search(r'submitter=(.*?);', webpage)  		if mobj is None:  			self._downloader.trouble(u'ERROR: unable to extract uploader nickname')  			return @@ -648,7 +656,7 @@ class DailymotionIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))  			return  		# Extract URL, uploader and title from webpage @@ -684,9 +692,14 @@ class DailymotionIE(InfoExtractor):  		video_title = unescapeHTML(mobj.group('title').decode('utf-8'))  		video_uploader = u'NA' -		mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a></span>', webpage) +		mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage)  		if mobj is None: -			self._downloader.trouble(u'WARNING: unable to extract uploader nickname') +			# lookin for official user +			mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage) +			if mobj_official is None: +				self._downloader.trouble(u'WARNING: unable to extract uploader nickname') +			else: +				video_uploader = mobj_official.group(1)  		else:  			video_uploader = mobj.group(1) @@ -741,7 +754,7 @@ class GoogleIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		# Extract URL, uploader, and title from webpage @@ -780,7 +793,7 @@ class GoogleIE(InfoExtractor):  			try:  				webpage = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  				return  			mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)  			if mobj is None: @@ -836,7 +849,7 @@ class PhotobucketIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		# Extract URL, uploader, and title from webpage @@ -906,7 +919,7 @@ class YahooIE(InfoExtractor):  			try:  				webpage = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  				return  			mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) @@ -930,7 +943,7 @@ class YahooIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		# Extract uploader and title from webpage @@ -988,7 +1001,7 @@ class YahooIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		# Extract media URL from playlist XML @@ -1017,7 +1030,7 @@ class VimeoIE(InfoExtractor):  	"""Information extractor for vimeo.com."""  	# _VALID_URL matches Vimeo URLs -	_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:groups/[^/]+/)?(?:videos?/)?([0-9]+)' +	_VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)'  	IE_NAME = u'vimeo'  	def __init__(self, downloader=None): @@ -1046,7 +1059,7 @@ class VimeoIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		# Now we begin extracting as much information as we can from what we @@ -1087,21 +1100,32 @@ class VimeoIE(InfoExtractor):  		timestamp = config['request']['timestamp']  		# Vimeo specific: extract video codec and quality information +		# First consider quality, then codecs, then take everything  		# TODO bind to format param  		codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] -		for codec in codecs: -			if codec[0] in config["video"]["files"]: -				video_codec = codec[0] -				video_extension = codec[1] -				if 'hd' in config["video"]["files"][codec[0]]: quality = 'hd' -				else: quality = 'sd' +		files = { 'hd': [], 'sd': [], 'other': []} +		for codec_name, codec_extension in codecs: +			if codec_name in config["video"]["files"]: +				if 'hd' in config["video"]["files"][codec_name]: +					files['hd'].append((codec_name, codec_extension, 'hd')) +				elif 'sd' in config["video"]["files"][codec_name]: +					files['sd'].append((codec_name, codec_extension, 'sd')) +				else: +					files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0])) + +		for quality in ('hd', 'sd', 'other'): +			if len(files[quality]) > 0: +				video_quality = files[quality][0][2] +				video_codec = files[quality][0][0] +				video_extension = files[quality][0][1] +				self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))  				break  		else:  			self._downloader.trouble(u'ERROR: no known codec found')  			return  		video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ -					%(video_id, sig, timestamp, quality, video_codec.upper()) +					%(video_id, sig, timestamp, video_quality, video_codec.upper())  		return [{  			'id':		video_id, @@ -1201,7 +1225,7 @@ class GenericIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		except ValueError, err:  			# since this is the last-resort InfoExtractor, if @@ -1322,7 +1346,7 @@ class YoutubeSearchIE(InfoExtractor):  			try:  				data = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to download API page: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))  				return  			api_response = json.loads(data)['data'] @@ -1399,7 +1423,7 @@ class GoogleSearchIE(InfoExtractor):  			try:  				page = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))  				return  			# Extract video identifiers @@ -1482,7 +1506,7 @@ class YahooSearchIE(InfoExtractor):  			try:  				page = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))  				return  			# Extract video identifiers @@ -1508,7 +1532,7 @@ class YahooSearchIE(InfoExtractor):  class YoutubePlaylistIE(InfoExtractor):  	"""Information Extractor for YouTube playlists.""" -	_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' +	_VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'  	_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'  	_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s'  	_MORE_PAGES_INDICATOR = r'yt-uix-pager-next' @@ -1552,7 +1576,7 @@ class YoutubePlaylistIE(InfoExtractor):  			try:  				page = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))  				return  			# Extract video identifiers @@ -1609,7 +1633,7 @@ class YoutubeChannelIE(InfoExtractor):  			try:  				page = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))  				return  			# Extract video identifiers @@ -1672,7 +1696,7 @@ class YoutubeUserIE(InfoExtractor):  			try:  				page = urllib2.urlopen(request).read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))  				return  			# Extract video identifiers @@ -1744,7 +1768,7 @@ class BlipTVUserIE(InfoExtractor):  			mobj = re.search(r'data-users-id="([^"]+)"', page)  			page_base = page_base % mobj.group(1)  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))  			return @@ -1832,7 +1856,7 @@ class DepositFilesIE(InfoExtractor):  			self.report_download_webpage(file_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))  			return  		# Search for the real file URL @@ -1949,7 +1973,7 @@ class FacebookIE(InfoExtractor):  				else:  					raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)  			except (IOError, netrc.NetrcParseError), err: -				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) +				self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))  				return  		if useremail is None: @@ -1969,7 +1993,7 @@ class FacebookIE(InfoExtractor):  				self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')  				return  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) +			self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))  			return  	def _real_extract(self, url): @@ -1986,7 +2010,7 @@ class FacebookIE(InfoExtractor):  			page = urllib2.urlopen(request)  			video_webpage = page.read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))  			return  		# Start extracting information @@ -2120,13 +2144,13 @@ class BlipTVIE(InfoExtractor):  					'urlhandle': urlh  				}  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))  			return  		if info is None: # Regular URL  			try:  				json_code = urlh.read()  			except (urllib2.URLError, httplib.HTTPException, socket.error), err: -				self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % str(err)) +				self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))  				return  			try: @@ -2194,7 +2218,7 @@ class MyVideoIE(InfoExtractor):  			self.report_download_webpage(video_id)  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		self.report_extraction(video_id) @@ -2229,6 +2253,25 @@ class ComedyCentralIE(InfoExtractor):  	_VALID_URL = r'^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport))|(https?://)?(www\.)?(?P<showname>thedailyshow|colbertnation)\.com/full-episodes/(?P<episode>.*)$'  	IE_NAME = u'comedycentral' +	_available_formats = ['3500', '2200', '1700', '1200', '750', '400'] + +	_video_extensions = { +		'3500': 'mp4', +		'2200': 'mp4', +		'1700': 'mp4', +		'1200': 'mp4', +		'750': 'mp4', +		'400': 'mp4', +	} +	_video_dimensions = { +		'3500': '1280x720', +		'2200': '960x540', +		'1700': '768x432', +		'1200': '640x360', +		'750': '512x288', +		'400': '384x216', +	} +  	def report_extraction(self, episode_id):  		self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) @@ -2241,6 +2284,13 @@ class ComedyCentralIE(InfoExtractor):  	def report_player_url(self, episode_id):  		self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) + +	def _print_formats(self, formats): +		print('Available formats:') +		for x in formats: +			print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???'))) + +  	def _real_extract(self, url):  		mobj = re.match(self._VALID_URL, url)  		if mobj is None: @@ -2281,10 +2331,19 @@ class ComedyCentralIE(InfoExtractor):  			epTitle = mobj.group('episode')  		mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*episode.*?:.*?))"', html) +  		if len(mMovieParams) == 0: -			self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) -			return +			# The Colbert Report embeds the information in a without +			# a URL prefix; so extract the alternate reference +			# and then add the URL prefix manually. +			altMovieParams = re.findall('data-mgid="([^"]*episode.*?:.*?)"', html) +			if len(altMovieParams) == 0: +				self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) +				return +			else: +				mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] +		  		playerUrl_raw = mMovieParams[0][0]  		self.report_player_url(epTitle)  		try: @@ -2333,10 +2392,31 @@ class ComedyCentralIE(InfoExtractor):  			if len(turls) == 0:  				self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')  				continue +			 +			if self._downloader.params.get('listformats', None): +				self._print_formats([i[0] for i in turls]) +				return  			# For now, just pick the highest bitrate  			format,video_url = turls[-1] +			# Get the format arg from the arg stream +			req_format = self._downloader.params.get('format', None) + +			# Select format if we can find one +			for f,v in turls: +				if f == req_format: +					format, video_url = f, v +					break + +			# Patch to download from alternative CDN, which does not +			# break on current RTMPDump builds +			broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/" +			better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/" + +			if video_url.startswith(broken_cdn): +				video_url = video_url.replace(broken_cdn, better_cdn) +  			effTitle = showId + u'-' + epTitle  			info = {  				'id': shortMediaId, @@ -2348,7 +2428,7 @@ class ComedyCentralIE(InfoExtractor):  				'format': format,  				'thumbnail': None,  				'description': officialTitle, -				'player_url': playerUrl +				'player_url': None #playerUrl  			}  			results.append(info) @@ -2456,7 +2536,7 @@ class CollegeHumorIE(InfoExtractor):  		try:  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))  			return  		m = re.search(r'id="video:(?P<internalvideoid>[0-9]+)"', webpage) @@ -2475,7 +2555,7 @@ class CollegeHumorIE(InfoExtractor):  		try:  			metaXml = urllib2.urlopen(xmlUrl).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))  			return  		mdoc = xml.etree.ElementTree.fromstring(metaXml) @@ -2521,7 +2601,7 @@ class XVideosIE(InfoExtractor):  		try:  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))  			return  		self.report_extraction(video_id) @@ -2607,7 +2687,7 @@ class SoundcloudIE(InfoExtractor):  		try:  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))  			return  		self.report_extraction('%s/%s' % (uploader, slug_title)) @@ -2634,7 +2714,7 @@ class SoundcloudIE(InfoExtractor):  		mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)  		if mobj:  			description = mobj.group(1) -		 +  		# upload date  		upload_date = None  		mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage) @@ -2642,7 +2722,7 @@ class SoundcloudIE(InfoExtractor):  			try:  				upload_date = datetime.datetime.strptime(mobj.group(1), '%B %d, %Y %H:%M').strftime('%Y%m%d')  			except Exception, e: -				self._downloader.to_stderr(str(e)) +				self._downloader.to_stderr(compat_str(e))  		# for soundcloud, a request to a cross domain is required for cookies  		request = urllib2.Request('http://media.soundcloud.com/crossdomain.xml', std_headers) @@ -2686,7 +2766,7 @@ class InfoQIE(InfoExtractor):  		try:  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))  			return  		self.report_extraction(url) @@ -2772,15 +2852,15 @@ class MixcloudIE(InfoExtractor):  		return None  	def _print_formats(self, formats): -		print 'Available formats:' +		print('Available formats:')  		for fmt in formats.keys():  			for b in formats[fmt]:  				try:  					ext = formats[fmt][b][0] -					print '%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]) +					print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))  				except TypeError: # we have no bitrate info  					ext = formats[fmt][0] -					print '%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]) +					print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))  					break  	def _real_extract(self, url): @@ -2800,7 +2880,7 @@ class MixcloudIE(InfoExtractor):  			self.report_download_json(file_url)  			jsonData = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))  			return  		# parse JSON @@ -2984,7 +3064,7 @@ class MTVIE(InfoExtractor):  		try:  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))  			return  		mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) @@ -3017,7 +3097,7 @@ class MTVIE(InfoExtractor):  		try:  			metadataXml = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % str(err)) +			self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))  			return  		mdoc = xml.etree.ElementTree.fromstring(metadataXml) @@ -3104,7 +3184,7 @@ class YoukuIE(InfoExtractor):  			self.report_download_webpage(video_id)  			jsondata = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error) as err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		self.report_extraction(video_id) @@ -3280,7 +3360,7 @@ class GooglePlusIE(InfoExtractor):  		try:  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))  			return  		# Extract update date @@ -3322,7 +3402,7 @@ class GooglePlusIE(InfoExtractor):  		try:  			webpage = urllib2.urlopen(request).read()  		except (urllib2.URLError, httplib.HTTPException, socket.error), err: -			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) +			self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))  			return  		self.report_extract_vid_page(video_page) | 
