From 4e1582f372d74d551e19d319e5b345002def480d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= Date: Mon, 4 Mar 2013 11:27:25 +0100 Subject: Use red color when printing error messages --- youtube_dl/FileDownloader.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'youtube_dl') diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 57f741c30..2f6c393a4 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -246,6 +246,18 @@ class FileDownloader(object): warning_message=u'%s %s' % (_msg_header,message) self.to_stderr(warning_message) + def report_error(self, message, tb=None): + ''' + Do the same as trouble, but prefixes the message with 'ERROR:', colored + in red if stderr is a tty file. + ''' + if sys.stderr.isatty(): + _msg_header = u'\033[0;31mERROR:\033[0m' + else: + _msg_header = u'ERROR:' + error_message = u'%s %s' % (_msg_header, message) + self.trouble(error_message, tb) + def slow_down(self, start_time, byte_counter): """Sleep if the download speed is over the rate limit.""" rate_limit = self.params.get('ratelimit', None) -- cgit v1.2.3 From 6622d22c79aa35ab1bd99c453afbdbecc0a9d61d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= Date: Mon, 4 Mar 2013 11:47:58 +0100 Subject: Use report_error in FileDownloader.py --- youtube_dl/FileDownloader.py | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 2f6c393a4..8d21a79d5 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -289,7 +289,7 @@ class FileDownloader(object): return os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) except (IOError, OSError) as err: - self.trouble(u'ERROR: unable to rename file') + self.report_error(u'unable to rename file') def try_utime(self, filename, last_modified_hdr): """Try to set the last-modified time of the given file.""" @@ -385,7 +385,7 @@ class FileDownloader(object): filename = self.params['outtmpl'] % template_dict return filename except (ValueError, KeyError) as err: - self.trouble(u'ERROR: invalid system charset or erroneous output template') + self.report_error(u'invalid system charset or erroneous output template') return None def _match_entry(self, info_dict): @@ -449,7 +449,7 @@ class FileDownloader(object): if dn != '' and not os.path.exists(dn): # dn is already encoded os.makedirs(dn) except (OSError, IOError) as err: - self.trouble(u'ERROR: unable to create directory ' + compat_str(err)) + self.report_error(u'unable to create directory ' + compat_str(err)) return if self.params.get('writedescription', False): @@ -459,7 +459,7 @@ class FileDownloader(object): with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) except (OSError, IOError): - self.trouble(u'ERROR: Cannot write description file ' + descfn) + self.report_error(u'Cannot write description file ' + descfn) return if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: @@ -471,7 +471,7 @@ class FileDownloader(object): with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile: srtfile.write(info_dict['subtitles']) except (OSError, IOError): - self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) + self.report_error(u'Cannot write subtitles file ' + descfn) return if self.params.get('writeinfojson', False): @@ -481,7 +481,7 @@ class FileDownloader(object): json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle']) write_json_file(json_info_dict, encodeFilename(infofn)) except (OSError, IOError): - self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) + self.report_error(u'Cannot write metadata to JSON file ' + infofn) return if not self.params.get('skip_download', False): @@ -493,17 +493,17 @@ class FileDownloader(object): except (OSError, IOError) as err: raise UnavailableVideoError() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self.trouble(u'ERROR: unable to download video data: %s' % str(err)) + self.report_error(u'unable to download video data: %s' % str(err)) return except (ContentTooShortError, ) as err: - self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) + self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return if success: try: self.post_process(filename, info_dict) except (PostProcessingError) as err: - self.trouble(u'ERROR: postprocessing: %s' % str(err)) + self.report_error(u'postprocessing: %s' % str(err)) return def download(self, url_list): @@ -534,7 +534,7 @@ class FileDownloader(object): break except Exception as e: if self.params.get('ignoreerrors', False): - self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc())) + self.report_error(u'' + compat_str(e), tb=compat_str(traceback.format_exc())) break else: raise @@ -548,13 +548,14 @@ class FileDownloader(object): self.increment_downloads() self.process_info(video) except UnavailableVideoError: - self.trouble(u'\nERROR: unable to download video') + self.to_stderr(u"\n") + self.report_error(u'unable to download video') # Suitable InfoExtractor had been found; go to next URL break if not suitable_found: - self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) + self.report_error(u'no suitable InfoExtractor: %s' % url) return self._download_retcode @@ -589,7 +590,7 @@ class FileDownloader(object): try: subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) except (OSError, IOError): - self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') + self.report_error(u'RTMP download detected but "rtmpdump" could not be run') return False # Download using rtmpdump. rtmpdump returns exit code 2 when @@ -634,7 +635,8 @@ class FileDownloader(object): }) return True else: - self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) + self.to_stderr(u"\n") + self.report_error(u'rtmpdump exited with code %d' % retval) return False def _do_download(self, filename, info_dict): @@ -734,7 +736,7 @@ class FileDownloader(object): self.report_retry(count, retries) if count > retries: - self.trouble(u'ERROR: giving up after %s retries' % retries) + self.report_error(u'giving up after %s retries' % retries) return False data_len = data.info().get('Content-length', None) @@ -770,12 +772,13 @@ class FileDownloader(object): filename = self.undo_temp_name(tmpfilename) self.report_destination(filename) except (OSError, IOError) as err: - self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) + self.report_error(u'unable to open for writing: %s' % str(err)) return False try: stream.write(data_block) except (IOError, OSError) as err: - self.trouble(u'\nERROR: unable to write data: %s' % str(err)) + self.to_stderr(u"\n") + self.report_error(u'unable to write data: %s' % str(err)) return False if not self.params.get('noresizebuffer', False): block_size = self.best_block_size(after - before, len(data_block)) @@ -801,7 +804,8 @@ class FileDownloader(object): self.slow_down(start, byte_counter - resume_len) if stream is None: - self.trouble(u'\nERROR: Did not get any data blocks') + self.to_stderr(u"\n") + self.report_error(u'Did not get any data blocks') return False stream.close() self.report_finish() -- cgit v1.2.3 From e5f30ade100b33127f31dd8989585a87e6faa6e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= Date: Mon, 4 Mar 2013 15:56:14 +0100 Subject: Use report_error in InfoExtractors.py Some calls haven't been changed --- youtube_dl/InfoExtractors.py | 300 +++++++++++++++++++++---------------------- 1 file changed, 150 insertions(+), 150 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 7ce84fe79..6328332a7 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -388,13 +388,13 @@ class YoutubeIE(InfoExtractor): self.report_age_confirmation() age_results = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) + self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err)) return def _extract_id(self, url): mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group(2) return video_id @@ -413,7 +413,7 @@ class YoutubeIE(InfoExtractor): try: video_webpage_bytes = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err)) return video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') @@ -438,18 +438,18 @@ class YoutubeIE(InfoExtractor): if 'token' in video_info: break except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err)) return if 'token' not in video_info: if 'reason' in video_info: - self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0]) + self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0]) else: - self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') + self._downloader.report_error(u'"token" parameter not in video info for unknown reason') return # Check for "rental" videos if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: - self._downloader.trouble(u'ERROR: "rental" videos not supported') + self._downloader.report_error(u'"rental" videos not supported') return # Start extracting information @@ -457,7 +457,7 @@ class YoutubeIE(InfoExtractor): # uploader if 'author' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract uploader name') + self._downloader.report_error(u'unable to extract uploader name') return video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) @@ -471,7 +471,7 @@ class YoutubeIE(InfoExtractor): # title if 'title' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract video title') + self._downloader.report_error(u'unable to extract video title') return video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) @@ -537,7 +537,7 @@ class YoutubeIE(InfoExtractor): format_list = available_formats existing_formats = [x for x in format_list if x in url_map] if len(existing_formats) == 0: - self._downloader.trouble(u'ERROR: no known formats available for video') + self._downloader.report_error(u'no known formats available for video') return if self._downloader.params.get('listformats', None): self._print_formats(existing_formats) @@ -558,10 +558,10 @@ class YoutubeIE(InfoExtractor): video_url_list = [(rf, url_map[rf])] break if video_url_list is None: - self._downloader.trouble(u'ERROR: requested format not available') + self._downloader.report_error(u'requested format not available') return else: - self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') + self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info') return results = [] @@ -624,7 +624,7 @@ class MetacafeIE(InfoExtractor): self.report_disclaimer() disclaimer = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) + self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err)) return # Confirm age @@ -637,14 +637,14 @@ class MetacafeIE(InfoExtractor): self.report_age_confirmation() disclaimer = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) + self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err)) return def _real_extract(self, url): # Extract id and simplified title from URL mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group(1) @@ -661,7 +661,7 @@ class MetacafeIE(InfoExtractor): self.report_download_webpage(video_id) webpage = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err)) return # Extract URL, uploader and title from webpage @@ -681,15 +681,15 @@ class MetacafeIE(InfoExtractor): else: mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') + self._downloader.report_error(u'unable to extract media URL') return vardict = compat_parse_qs(mobj.group(1)) if 'mediaData' not in vardict: - self._downloader.trouble(u'ERROR: unable to extract media URL') + self._downloader.report_error(u'unable to extract media URL') return mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') + self._downloader.report_error(u'unable to extract media URL') return mediaURL = mobj.group(1).replace('\\/', '/') video_extension = mediaURL[-3:] @@ -697,13 +697,13 @@ class MetacafeIE(InfoExtractor): mobj = re.search(r'(?im)(.*) - Video', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return video_title = mobj.group(1).decode('utf-8') mobj = re.search(r'submitter=(.*?);', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + self._downloader.report_error(u'unable to extract uploader nickname') return video_uploader = mobj.group(1) @@ -735,7 +735,7 @@ class DailymotionIE(InfoExtractor): # Extract id and simplified title from URL mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group(1).split('_')[0].split('?')[0] @@ -751,7 +751,7 @@ class DailymotionIE(InfoExtractor): self.report_extraction(video_id) mobj = re.search(r'\s*var flashvars = (.*)', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') + self._downloader.report_error(u'unable to extract media URL') return flashvars = compat_urllib_parse.unquote(mobj.group(1)) @@ -761,12 +761,12 @@ class DailymotionIE(InfoExtractor): self._downloader.to_screen(u'[dailymotion] Using %s' % key) break else: - self._downloader.trouble(u'ERROR: unable to extract video URL') + self._downloader.report_error(u'unable to extract video URL') return mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video URL') + self._downloader.report_error(u'unable to extract video URL') return video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/') @@ -775,7 +775,7 @@ class DailymotionIE(InfoExtractor): mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return video_title = unescapeHTML(mobj.group('title')) @@ -827,7 +827,7 @@ class PhotobucketIE(InfoExtractor): # Extract id from URL mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + self._downloader.report_error(u'Invalid URL: %s' % url) return video_id = mobj.group(1) @@ -840,14 +840,14 @@ class PhotobucketIE(InfoExtractor): self.report_download_webpage(video_id) webpage = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err)) return # Extract URL, uploader, and title from webpage self.report_extraction(video_id) mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') + self._downloader.report_error(u'unable to extract media URL') return mediaURL = compat_urllib_parse.unquote(mobj.group(1)) @@ -855,7 +855,7 @@ class PhotobucketIE(InfoExtractor): mobj = re.search(r'(.*) video by (.*) - Photobucket', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return video_title = mobj.group(1).decode('utf-8') @@ -896,7 +896,7 @@ class YahooIE(InfoExtractor): # Extract ID from URL mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + self._downloader.report_error(u'Invalid URL: %s' % url) return video_id = mobj.group(2) @@ -909,18 +909,18 @@ class YahooIE(InfoExtractor): try: webpage = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err)) return mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: Unable to extract id field') + self._downloader.report_error(u'Unable to extract id field') return yahoo_id = mobj.group(1) mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: Unable to extract vid field') + self._downloader.report_error(u'Unable to extract vid field') return yahoo_vid = mobj.group(1) @@ -933,34 +933,34 @@ class YahooIE(InfoExtractor): self.report_download_webpage(video_id) webpage = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err)) return # Extract uploader and title from webpage self.report_extraction(video_id) mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video title') + self._downloader.report_error(u'unable to extract video title') return video_title = mobj.group(1).decode('utf-8') mobj = re.search(r'

(.*)

', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video uploader') + self._downloader.report_error(u'unable to extract video uploader') return video_uploader = mobj.group(1).decode('utf-8') # Extract video thumbnail mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + self._downloader.report_error(u'unable to extract video thumbnail') return video_thumbnail = mobj.group(1).decode('utf-8') # Extract video description mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video description') + self._downloader.report_error(u'unable to extract video description') return video_description = mobj.group(1).decode('utf-8') if not video_description: @@ -969,13 +969,13 @@ class YahooIE(InfoExtractor): # Extract video height and width mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video height') + self._downloader.report_error(u'unable to extract video height') return yv_video_height = mobj.group(1) mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video width') + self._downloader.report_error(u'unable to extract video width') return yv_video_width = mobj.group(1) @@ -991,13 +991,13 @@ class YahooIE(InfoExtractor): self.report_download_webpage(video_id) webpage = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err)) return # Extract media URL from playlist XML mobj = re.search(r'(.*)', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return video_title = mobj.group(1) # video uploader is domain name mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return video_uploader = mobj.group(1) @@ -1437,7 +1437,7 @@ class YoutubeSearchIE(InfoExtractor): def _real_extract(self, query): mobj = re.match(self._VALID_URL, query) if mobj is None: - self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) + self._downloader.report_error(u'invalid search query "%s"' % query) return prefix, query = query.split(':') @@ -1453,7 +1453,7 @@ class YoutubeSearchIE(InfoExtractor): try: n = int(prefix) if n <= 0: - self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) + self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query)) return elif n > self._max_youtube_results: self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) @@ -1478,7 +1478,7 @@ class YoutubeSearchIE(InfoExtractor): try: data = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download API page: %s' % compat_str(err)) return api_response = json.loads(data)['data'] @@ -1519,7 +1519,7 @@ class GoogleSearchIE(InfoExtractor): def _real_extract(self, query): mobj = re.match(self._VALID_URL, query) if mobj is None: - self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) + self._downloader.report_error(u'invalid search query "%s"' % query) return prefix, query = query.split(':') @@ -1535,7 +1535,7 @@ class GoogleSearchIE(InfoExtractor): try: n = int(prefix) if n <= 0: - self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) + self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query)) return elif n > self._max_google_results: self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) @@ -1559,7 +1559,7 @@ class GoogleSearchIE(InfoExtractor): try: page = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return # Extract video identifiers @@ -1603,7 +1603,7 @@ class YahooSearchIE(InfoExtractor): def _real_extract(self, query): mobj = re.match(self._VALID_URL, query) if mobj is None: - self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) + self._downloader.report_error(u'invalid search query "%s"' % query) return prefix, query = query.split(':') @@ -1619,7 +1619,7 @@ class YahooSearchIE(InfoExtractor): try: n = int(prefix) if n <= 0: - self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) + self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query)) return elif n > self._max_yahoo_results: self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) @@ -1644,7 +1644,7 @@ class YahooSearchIE(InfoExtractor): try: page = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return # Extract video identifiers @@ -1706,7 +1706,7 @@ class YoutubePlaylistIE(InfoExtractor): # Extract playlist id mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: - self._downloader.trouble(u'ERROR: invalid url: %s' % url) + self._downloader.report_error(u'invalid url: %s' % url) return # Download playlist videos from API @@ -1721,17 +1721,17 @@ class YoutubePlaylistIE(InfoExtractor): try: page = compat_urllib_request.urlopen(url).read().decode('utf8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return try: response = json.loads(page) except ValueError as err: - self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err)) + self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err)) return if not 'feed' in response or not 'entry' in response['feed']: - self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API') + self._downloader.report_error(u'Got a malformed response from YouTube API') return videos += [ (entry['yt$position']['$t'], entry['content']['src']) for entry in response['feed']['entry'] @@ -1777,7 +1777,7 @@ class YoutubeChannelIE(InfoExtractor): # Extract channel id mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid url: %s' % url) + self._downloader.report_error(u'invalid url: %s' % url) return # Download channel pages @@ -1792,7 +1792,7 @@ class YoutubeChannelIE(InfoExtractor): try: page = compat_urllib_request.urlopen(request).read().decode('utf8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return # Extract video identifiers @@ -1835,7 +1835,7 @@ class YoutubeUserIE(InfoExtractor): # Extract username mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid url: %s' % url) + self._downloader.report_error(u'invalid url: %s' % url) return username = mobj.group(1) @@ -1857,7 +1857,7 @@ class YoutubeUserIE(InfoExtractor): try: page = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return # Extract video identifiers @@ -1915,7 +1915,7 @@ class BlipTVUserIE(InfoExtractor): # Extract username mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid url: %s' % url) + self._downloader.report_error(u'invalid url: %s' % url) return username = mobj.group(1) @@ -1929,7 +1929,7 @@ class BlipTVUserIE(InfoExtractor): mobj = re.search(r'data-users-id="([^"]+)"', page) page_base = page_base % mobj.group(1) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return @@ -1948,7 +1948,7 @@ class BlipTVUserIE(InfoExtractor): try: page = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % str(err)) return # Extract video identifiers @@ -2012,7 +2012,7 @@ class DepositFilesIE(InfoExtractor): self.report_download_webpage(file_id) webpage = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err)) return # Search for the real file URL @@ -2022,9 +2022,9 @@ class DepositFilesIE(InfoExtractor): mobj = re.search(r'(Attention.*?)', webpage, re.DOTALL) if (mobj is not None) and (mobj.group(1) is not None): restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() - self._downloader.trouble(u'ERROR: %s' % restriction_message) + self._downloader.report_error(u'%s' % restriction_message) else: - self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) + self._downloader.report_error(u'unable to extract download URL from: %s' % url) return file_url = mobj.group(1) @@ -2033,7 +2033,7 @@ class DepositFilesIE(InfoExtractor): # Search for file title mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return file_title = mobj.group(1).decode('utf-8') @@ -2106,7 +2106,7 @@ class FacebookIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group('ID') @@ -2162,7 +2162,7 @@ class BlipTVIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return urlp = compat_urllib_parse_urlparse(url) @@ -2209,7 +2209,7 @@ class BlipTVIE(InfoExtractor): json_code_bytes = urlh.read() json_code = json_code_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err)) return try: @@ -2240,7 +2240,7 @@ class BlipTVIE(InfoExtractor): 'user_agent': 'iTunes/10.6.1', } except (ValueError,KeyError) as err: - self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) + self._downloader.report_error(u'unable to parse video information: %s' % repr(err)) return return [info] @@ -2262,7 +2262,7 @@ class MyVideoIE(InfoExtractor): def _real_extract(self,url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._download.trouble(u'ERROR: invalid URL: %s' % url) + self._download.report_error(u'invalid URL: %s' % url) return video_id = mobj.group(1) @@ -2275,13 +2275,13 @@ class MyVideoIE(InfoExtractor): mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') + self._downloader.report_error(u'unable to extract media URL') return video_url = mobj.group(1) + ('/%s.flv' % video_id) mobj = re.search('([^<]+)', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return video_title = mobj.group(1) @@ -2354,7 +2354,7 @@ class ComedyCentralIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return if mobj.group('shortname'): @@ -2385,16 +2385,16 @@ class ComedyCentralIE(InfoExtractor): html = htmlHandle.read() webpage = html.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return if dlNewest: url = htmlHandle.geturl() mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: - self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) + self._downloader.report_error(u'Invalid redirected URL: ' + url) return if mobj.group('episode') == '': - self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url) + self._downloader.report_error(u'Redirected URL is still not specific: ' + url) return epTitle = mobj.group('episode') @@ -2407,7 +2407,7 @@ class ComedyCentralIE(InfoExtractor): altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage) if len(altMovieParams) == 0: - self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) + self._downloader.report_error(u'unable to find Flash URL in webpage ' + url) return else: mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] @@ -2418,7 +2418,7 @@ class ComedyCentralIE(InfoExtractor): try: indexXml = compat_urllib_request.urlopen(indexUrl).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err)) + self._downloader.report_error(u'unable to download episode index: ' + compat_str(err)) return results = [] @@ -2439,7 +2439,7 @@ class ComedyCentralIE(InfoExtractor): try: configXml = compat_urllib_request.urlopen(configReq).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err)) return cdoc = xml.etree.ElementTree.fromstring(configXml) @@ -2506,7 +2506,7 @@ class EscapistIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return showName = mobj.group('showname') videoId = mobj.group('episode') @@ -2518,7 +2518,7 @@ class EscapistIE(InfoExtractor): m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err)) + self._downloader.report_error(u'unable to download webpage: ' + compat_str(err)) return descMatch = re.search('(.*?)\s+-\s+XVID', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video title') + self._downloader.report_error(u'unable to extract video title') return video_title = mobj.group(1) @@ -2678,7 +2678,7 @@ class XVideosIE(InfoExtractor): # Extract video thumbnail mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + self._downloader.report_error(u'unable to extract video thumbnail') return video_thumbnail = mobj.group(0) @@ -2722,7 +2722,7 @@ class SoundcloudIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return # extract uploader (which is in the url) @@ -2740,7 +2740,7 @@ class SoundcloudIE(InfoExtractor): info_json_bytes = compat_urllib_request.urlopen(request).read() info_json = info_json_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err)) return info = json.loads(info_json) @@ -2753,7 +2753,7 @@ class SoundcloudIE(InfoExtractor): stream_json_bytes = compat_urllib_request.urlopen(request).read() stream_json = stream_json_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err)) return streams = json.loads(stream_json) @@ -2781,7 +2781,7 @@ class InfoQIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return webpage = self._download_webpage(url, video_id=url) @@ -2790,7 +2790,7 @@ class InfoQIE(InfoExtractor): # Extract video URL mobj = re.search(r"jsclassref='([^']*)'", webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video url') + self._downloader.report_error(u'unable to extract video url') return real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8')) video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id @@ -2798,7 +2798,7 @@ class InfoQIE(InfoExtractor): # Extract title mobj = re.search(r'contentTitle = "(.*?)";', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video title') + self._downloader.report_error(u'unable to extract video title') return video_title = mobj.group(1) @@ -2881,7 +2881,7 @@ class MixcloudIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return # extract uploader & filename from url uploader = mobj.group(1).decode('utf-8') @@ -2895,7 +2895,7 @@ class MixcloudIE(InfoExtractor): self.report_download_json(file_url) jsonData = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err)) return # parse JSON @@ -2919,7 +2919,7 @@ class MixcloudIE(InfoExtractor): break # got it! else: if req_format not in formats: - self._downloader.trouble(u'ERROR: format is not available') + self._downloader.report_error(u'format is not available') return url_list = self.get_urls(formats, req_format) @@ -2973,7 +2973,7 @@ class StanfordOpenClassroomIE(InfoExtractor): try: metaXml = compat_urllib_request.urlopen(xmlUrl).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err)) return mdoc = xml.etree.ElementTree.fromstring(metaXml) try: @@ -3032,7 +3032,7 @@ class StanfordOpenClassroomIE(InfoExtractor): try: rootpage = compat_urllib_request.urlopen(rootURL).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) + self._downloader.report_error(u'unable to download course info page: ' + compat_str(err)) return info['title'] = info['id'] @@ -3064,7 +3064,7 @@ class MTVIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return if not mobj.group('proto'): url = 'http://' + url @@ -3074,25 +3074,25 @@ class MTVIE(InfoExtractor): mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract song name') + self._downloader.report_error(u'unable to extract song name') return song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1')) mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract performer') + self._downloader.report_error(u'unable to extract performer') return performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) video_title = performer + ' - ' + song_name mobj = re.search(r'', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to mtvn_uri') + self._downloader.report_error(u'unable to mtvn_uri') return mtvn_uri = mobj.group(1) mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract content id') + self._downloader.report_error(u'unable to extract content id') return content_id = mobj.group(1) @@ -3102,7 +3102,7 @@ class MTVIE(InfoExtractor): try: metadataXml = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err)) return mdoc = xml.etree.ElementTree.fromstring(metadataXml) @@ -3174,7 +3174,7 @@ class YoukuIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group('ID') @@ -3185,7 +3185,7 @@ class YoukuIE(InfoExtractor): self.report_download_webpage(video_id) jsondata = compat_urllib_request.urlopen(request).read() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err)) return self.report_extraction(video_id) @@ -3216,7 +3216,7 @@ class YoukuIE(InfoExtractor): fileid = config['data'][0]['streamfileids'][format] keys = [s['k'] for s in config['data'][0]['segs'][format]] except (UnicodeDecodeError, ValueError, KeyError): - self._downloader.trouble(u'ERROR: unable to extract info section') + self._downloader.report_error(u'unable to extract info section') return files_info=[] @@ -3263,7 +3263,7 @@ class XNXXIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group(1) @@ -3274,24 +3274,24 @@ class XNXXIE(InfoExtractor): webpage_bytes = compat_urllib_request.urlopen(url).read() webpage = webpage_bytes.decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) + self._downloader.report_error(u'unable to download video webpage: %s' % err) return result = re.search(self.VIDEO_URL_RE, webpage) if result is None: - self._downloader.trouble(u'ERROR: unable to extract video url') + self._downloader.report_error(u'unable to extract video url') return video_url = compat_urllib_parse.unquote(result.group(1)) result = re.search(self.VIDEO_TITLE_RE, webpage) if result is None: - self._downloader.trouble(u'ERROR: unable to extract video title') + self._downloader.report_error(u'unable to extract video title') return video_title = result.group(1) result = re.search(self.VIDEO_THUMB_RE, webpage) if result is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + self._downloader.report_error(u'unable to extract video thumbnail') return video_thumbnail = result.group(1) @@ -3340,7 +3340,7 @@ class GooglePlusIE(InfoExtractor): # Extract id from URL mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + self._downloader.report_error(u'Invalid URL: %s' % url) return post_url = mobj.group(0) @@ -3354,7 +3354,7 @@ class GooglePlusIE(InfoExtractor): try: webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve entry webpage: %s' % compat_str(err)) return # Extract update date @@ -3389,14 +3389,14 @@ class GooglePlusIE(InfoExtractor): pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]' mobj = re.search(pattern, webpage) if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video page URL') + self._downloader.report_error(u'unable to extract video page URL') video_page = mobj.group(1) request = compat_urllib_request.Request(video_page) try: webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err)) return self.report_extract_vid_page(video_page) @@ -3406,7 +3406,7 @@ class GooglePlusIE(InfoExtractor): pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' mobj = re.findall(pattern, webpage) if len(mobj) == 0: - self._downloader.trouble(u'ERROR: unable to extract video links') + self._downloader.report_error(u'unable to extract video links') # Sort in resolution links = sorted(mobj) @@ -3438,7 +3438,7 @@ class NBAIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group(1) @@ -3494,13 +3494,13 @@ class JustinTVIE(InfoExtractor): webpage_bytes = urlh.read() webpage = webpage_bytes.decode('utf-8', 'ignore') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err)) + self._downloader.report_error(u'unable to download video info JSON: %s' % compat_str(err)) return response = json.loads(webpage) if type(response) != list: error_text = response.get('error', 'unknown error') - self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text) + self._downloader.report_error(u'Justin.tv API: %s' % error_text) return info = [] for clip in response: @@ -3525,7 +3525,7 @@ class JustinTVIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return api = 'http://api.justin.tv' @@ -3560,7 +3560,7 @@ class FunnyOrDieIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group('id') @@ -3568,7 +3568,7 @@ class FunnyOrDieIE(InfoExtractor): m = re.search(r']*>\s*]*>\s*\s+(?P.*?)</a>", webpage) @@ -3621,7 +3621,7 @@ class SteamIE(InfoExtractor): video_url = vid.group('videoURL') video_thumb = thumb.group('thumbnail') if not video_url: - self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id) + self._downloader.report_error(u'Cannot find video url for %s' % video_id) info = { 'id':video_id, 'url':video_url, @@ -3711,7 +3711,7 @@ class YouPornIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group('videoid') @@ -3803,7 +3803,7 @@ class YouPornIE(InfoExtractor): else: format = self._specific( req_format, formats ) if result is None: - self._downloader.trouble(u'ERROR: requested format not available') + self._downloader.report_error(u'requested format not available') return return [format] @@ -3816,7 +3816,7 @@ class PornotubeIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group('videoid') @@ -3829,7 +3829,7 @@ class PornotubeIE(InfoExtractor): VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",' result = re.search(VIDEO_URL_RE, webpage) if result is None: - self._downloader.trouble(u'ERROR: unable to extract video url') + self._downloader.report_error(u'unable to extract video url') return video_url = compat_urllib_parse.unquote(result.group('url')) @@ -3837,7 +3837,7 @@ class PornotubeIE(InfoExtractor): VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by' result = re.search(VIDEO_UPLOADED_RE, webpage) if result is None: - self._downloader.trouble(u'ERROR: unable to extract video title') + self._downloader.report_error(u'unable to extract video title') return upload_date = result.group('date') @@ -3858,7 +3858,7 @@ class YouJizzIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + self._downloader.report_error(u'invalid URL: %s' % url) return video_id = mobj.group('videoid') @@ -4059,13 +4059,13 @@ class MySpassIE(InfoExtractor): # extract values from metadata url_flv_el = metadata.find('url_flv') if url_flv_el is None: - self._downloader.trouble(u'ERROR: unable to extract download url') + self._downloader.report_error(u'unable to extract download url') return video_url = url_flv_el.text extension = os.path.splitext(video_url)[1][1:] title_el = metadata.find('title') if title_el is None: - self._downloader.trouble(u'ERROR: unable to extract title') + self._downloader.report_error(u'unable to extract title') return title = title_el.text format_id_el = metadata.find('format_id') -- cgit v1.2.3 From c9fa1cbab6b24f48449aca3b0eddabee6d95a7d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= <jaimemf93@gmail.com> Date: Tue, 5 Mar 2013 21:13:17 +0100 Subject: More trouble calls changed in InfoExtractors.py The calls with the message starting with 'WARNING' have been changed to report_warning instead of report_error --- youtube_dl/InfoExtractors.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 6328332a7..83bf5b8f6 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -467,7 +467,7 @@ class YoutubeIE(InfoExtractor): if mobj is not None: video_uploader_id = mobj.group(1) else: - self._downloader.trouble(u'WARNING: unable to extract uploader nickname') + self._downloader.report_warning(u'unable to extract uploader nickname') # title if 'title' not in video_info: @@ -477,7 +477,7 @@ class YoutubeIE(InfoExtractor): # thumbnail image if 'thumbnail_url' not in video_info: - self._downloader.trouble(u'WARNING: unable to extract video thumbnail') + self._downloader.report_warning(u'unable to extract video thumbnail') video_thumbnail = '' else: # don't panic if we can't find it video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) @@ -509,7 +509,7 @@ class YoutubeIE(InfoExtractor): self._downloader.trouble(srt_error) if 'length_seconds' not in video_info: - self._downloader.trouble(u'WARNING: unable to extract video duration') + self._downloader.report_warning(u'unable to extract video duration') video_duration = '' else: video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) @@ -785,7 +785,7 @@ class DailymotionIE(InfoExtractor): # lookin for official user mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage) if mobj_official is None: - self._downloader.trouble(u'WARNING: unable to extract uploader nickname') + self._downloader.report_warning(u'unable to extract uploader nickname') else: video_uploader = mobj_official.group(1) else: @@ -2449,7 +2449,7 @@ class ComedyCentralIE(InfoExtractor): turls.append(finfo) if len(turls) == 0: - self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') + self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found') continue if self._downloader.params.get('listformats', None): @@ -2609,7 +2609,7 @@ class CollegeHumorIE(InfoExtractor): info['thumbnail'] = videoNode.findall('./thumbnail')[0].text manifest_url = videoNode.findall('./file')[0].text except IndexError: - self._downloader.trouble(u'\nERROR: Invalid metadata XML file') + self._downloader.report_error(u'Invalid metadata XML file') return manifest_url += '?hdcore=2.10.3' @@ -2626,7 +2626,7 @@ class CollegeHumorIE(InfoExtractor): node_id = media_node.attrib['url'] video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text except IndexError as err: - self._downloader.trouble(u'\nERROR: Invalid manifest file') + self._downloader.report_error(u'Invalid manifest file') return url_pr = compat_urllib_parse_urlparse(manifest_url) @@ -2980,7 +2980,7 @@ class StanfordOpenClassroomIE(InfoExtractor): info['title'] = mdoc.findall('./title')[0].text info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text except IndexError: - self._downloader.trouble(u'\nERROR: Invalid metadata XML file') + self._downloader.report_error(u'Invalid metadata XML file') return info['ext'] = info['url'].rpartition('.')[2] return [info] -- cgit v1.2.3 From 40634747f74d2c85b28ee33f11672378c9b30949 Mon Sep 17 00:00:00 2001 From: Johny Mo Swag <johnymo@me.com> Date: Wed, 6 Mar 2013 21:09:55 -0800 Subject: Support for WorldStarHipHop.com --- youtube_dl/InfoExtractors.py | 63 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 6b03bf307..8be2f160c 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -2557,7 +2557,7 @@ class EscapistIE(InfoExtractor): 'uploader': showName, 'upload_date': None, 'title': showName, - 'ext': 'mp4', + 'ext': 'flv', 'thumbnail': imgUrl, 'description': description, 'player_url': playerUrl, @@ -3654,6 +3654,66 @@ class UstreamIE(InfoExtractor): } return [info] +class WorldStarHipHopIE(InfoExtractor): + _VALID_URL = r"""(http://(?:www|m).worldstar(?:candy|hiphop)\.com.*)""" + IE_NAME = u'WorldStarHipHop' + + def _real_extract(self, url): + results = [] + + _src_url = r"""(http://hw-videos.*(?:mp4|flv))""" + + webpage_src = compat_urllib_request.urlopen(str(url)).read() + + mobj = re.search(_src_url, webpage_src) + + if mobj is not None: + video_url = mobj.group() + if 'mp4' in video_url: + ext = '.mp4' + else: + ext = '.flv' + else: + video_url = None + ext = None + + _title = r"""<title>(.*)""" + + mobj = re.search(_title, webpage_src) + + if mobj is not None: + title = mobj.group(1) + title = title.replace("'", "") + title = title.replace("'", "") + title = title.replace('Video: ', '') + title = title.replace('"', '"') + title = title.replace('&', 'n') + else: + title = None + + _thumbnail = r"""rel="image_src" href="(.*)" />""" + + mobj = re.search(_thumbnail, webpage_src) + + # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video. + if mobj is not None: + thumbnail = mobj.group(1) + else: + _title = r"""candytitles.*>(.*)""" + mobj = re.search(_title, webpage_src) + if mobj is not None: + title = mobj.group(1) + thumbnail = None + + results.append({ + 'url' : video_url, + 'title' : title, + 'thumbnail' : thumbnail, + 'ext' : ext + }) + + return results + class RBMARadioIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P[^/]+)$' @@ -4133,6 +4193,7 @@ def gen_extractors(): GooglePlusIE(), ArteTvIE(), NBAIE(), + WorldStarHipHopIE(), JustinTVIE(), FunnyOrDieIE(), SteamIE(), -- cgit v1.2.3 From 61e40c88a989d31b6f06d7001f614d62f06941a5 Mon Sep 17 00:00:00 2001 From: Johny Mo Swag Date: Wed, 6 Mar 2013 21:14:46 -0800 Subject: fixed typo --- youtube_dl/InfoExtractors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 8be2f160c..58803c48a 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -2557,7 +2557,7 @@ class EscapistIE(InfoExtractor): 'uploader': showName, 'upload_date': None, 'title': showName, - 'ext': 'flv', + 'ext': 'mp4', 'thumbnail': imgUrl, 'description': description, 'player_url': playerUrl, -- cgit v1.2.3 From b3bcca0844cc8197cbb5e1e8127b1b8164304940 Mon Sep 17 00:00:00 2001 From: Johny Mo Swag Date: Thu, 7 Mar 2013 15:39:17 -0800 Subject: clean up --- youtube_dl/InfoExtractors.py | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 58803c48a..178b0beed 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3659,20 +3659,19 @@ class WorldStarHipHopIE(InfoExtractor): IE_NAME = u'WorldStarHipHop' def _real_extract(self, url): - results = [] - _src_url = r"""(http://hw-videos.*(?:mp4|flv))""" webpage_src = compat_urllib_request.urlopen(str(url)).read() + webpage_src = webpage_src.decode('utf-8') mobj = re.search(_src_url, webpage_src) if mobj is not None: video_url = mobj.group() if 'mp4' in video_url: - ext = '.mp4' + ext = 'mp4' else: - ext = '.flv' + ext = 'flv' else: video_url = None ext = None @@ -3683,16 +3682,12 @@ class WorldStarHipHopIE(InfoExtractor): if mobj is not None: title = mobj.group(1) - title = title.replace("'", "") - title = title.replace("'", "") - title = title.replace('Video: ', '') - title = title.replace('"', '"') - title = title.replace('&', 'n') else: - title = None + title = 'World Start Hip Hop - %s' % time.ctime() _thumbnail = r"""rel="image_src" href="(.*)" />""" + print title mobj = re.search(_thumbnail, webpage_src) # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video. @@ -3705,13 +3700,12 @@ class WorldStarHipHopIE(InfoExtractor): title = mobj.group(1) thumbnail = None - results.append({ - 'url' : video_url, - 'title' : title, - 'thumbnail' : thumbnail, - 'ext' : ext - }) - + results = [{ + 'url' : video_url, + 'title' : title, + 'thumbnail' : thumbnail, + 'ext' : ext, + }] return results class RBMARadioIE(InfoExtractor): -- cgit v1.2.3 From 64c78d50ccf05f34e27b652530fc8b702aa54122 Mon Sep 17 00:00:00 2001 From: Johny Mo Swag Date: Thu, 7 Mar 2013 16:27:21 -0800 Subject: working - worldstarhiphop IE Support for WorldStarHipHop --- youtube_dl/InfoExtractors.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 178b0beed..f69bad4f3 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3655,7 +3655,7 @@ class UstreamIE(InfoExtractor): return [info] class WorldStarHipHopIE(InfoExtractor): - _VALID_URL = r"""(http://(?:www|m).worldstar(?:candy|hiphop)\.com.*)""" + _VALID_URL = r'http://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P.*)' IE_NAME = u'WorldStarHipHop' def _real_extract(self, url): @@ -3686,8 +3686,6 @@ class WorldStarHipHopIE(InfoExtractor): title = 'World Start Hip Hop - %s' % time.ctime() _thumbnail = r"""rel="image_src" href="(.*)" />""" - - print title mobj = re.search(_thumbnail, webpage_src) # Getting thumbnail and if not thumbnail sets correct title for WSHH candy video. @@ -3700,7 +3698,11 @@ class WorldStarHipHopIE(InfoExtractor): title = mobj.group(1) thumbnail = None + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + results = [{ + 'id': video_id, 'url' : video_url, 'title' : title, 'thumbnail' : thumbnail, -- cgit v1.2.3 From 3b221c540640f7df9e4dc453a736dd25fe2505c4 Mon Sep 17 00:00:00 2001 From: Johny Mo Swag Date: Fri, 8 Mar 2013 22:39:45 -0800 Subject: removed str used for other project. --- youtube_dl/InfoExtractors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index f69bad4f3..c2e3c8983 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3661,7 +3661,7 @@ class WorldStarHipHopIE(InfoExtractor): def _real_extract(self, url): _src_url = r"""(http://hw-videos.*(?:mp4|flv))""" - webpage_src = compat_urllib_request.urlopen(str(url)).read() + webpage_src = compat_urllib_request.urlopen(url).read() webpage_src = webpage_src.decode('utf-8') mobj = re.search(_src_url, webpage_src) -- cgit v1.2.3 From 08ec0af7c69f5da0f8c75c84886694877b9b08bf Mon Sep 17 00:00:00 2001 From: Johny Mo Swag Date: Fri, 8 Mar 2013 22:48:05 -0800 Subject: catch fatal error --- youtube_dl/InfoExtractors.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index c2e3c8983..a31aa759e 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -3666,6 +3666,9 @@ class WorldStarHipHopIE(InfoExtractor): mobj = re.search(_src_url, webpage_src) + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + if mobj is not None: video_url = mobj.group() if 'mp4' in video_url: @@ -3673,8 +3676,8 @@ class WorldStarHipHopIE(InfoExtractor): else: ext = 'flv' else: - video_url = None - ext = None + self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id) + return _title = r"""(.*)""" @@ -3697,9 +3700,6 @@ class WorldStarHipHopIE(InfoExtractor): if mobj is not None: title = mobj.group(1) thumbnail = None - - m = re.match(self._VALID_URL, url) - video_id = m.group('id') results = [{ 'id': video_id, -- cgit v1.2.3 From 8cc83b8dbea6e4f34f483c4a209158307df566f0 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Sat, 9 Mar 2013 10:05:43 +0100 Subject: Bubble up all the stack of exceptions and retry download tests on timeout errors --- youtube_dl/FileDownloader.py | 16 +++++++++++++--- youtube_dl/utils.py | 6 +++++- 2 files changed, 18 insertions(+), 4 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 3b2adf84b..a13a5f9d7 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -227,11 +227,21 @@ class FileDownloader(object): self.to_stderr(message) if self.params.get('verbose'): if tb is None: - tb_data = traceback.format_list(traceback.extract_stack()) - tb = u''.join(tb_data) + if sys.exc_info()[0]: # if .trouble has been called from an except block + tb = u'' + if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: + tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) + tb += compat_str(traceback.format_exc()) + else: + tb_data = traceback.format_list(traceback.extract_stack()) + tb = u''.join(tb_data) self.to_stderr(tb) if not self.params.get('ignoreerrors', False): - raise DownloadError(message) + if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: + exc_info = sys.exc_info()[1].exc_info + else: + exc_info = sys.exc_info() + raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message): diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 95bd94843..88d4ece13 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -435,6 +435,7 @@ class ExtractorError(Exception): """ tb, if given, is the original traceback (so that it can be printed out). """ super(ExtractorError, self).__init__(msg) self.traceback = tb + self.exc_info = sys.exc_info() # preserve original exception def format_traceback(self): if self.traceback is None: @@ -449,7 +450,10 @@ class DownloadError(Exception): configured to continue on errors. They will contain the appropriate error message. """ - pass + def __init__(self, msg, exc_info=None): + """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ + super(DownloadError, self).__init__(msg) + self.exc_info = exc_info class SameFileError(Exception): -- cgit v1.2.3 From c3971870616fb24c298b8f6f1bf1ec7c16c75470 Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Sat, 16 Mar 2013 23:52:17 +0100 Subject: Spiegel: Support hash at end of URL --- youtube_dl/InfoExtractors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 44b4c4376..5339bc0cd 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -4097,7 +4097,7 @@ class MySpassIE(InfoExtractor): return [info] class SpiegelIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P[0-9]+)(?:\.html)?$' + _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P[0-9]+)(?:\.html)?(?:#.*)$' def _real_extract(self, url): m = re.match(self._VALID_URL, url) -- cgit v1.2.3 From 5011cded16d15bb03c2f172ddae81499d764e28a Mon Sep 17 00:00:00 2001 From: dodo Date: Sun, 24 Mar 2013 02:24:07 +0100 Subject: SoundcloudSetIE info extractor for soundcloud sets --- youtube_dl/InfoExtractors.py | 82 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 835428f32..87a926068 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -2802,6 +2802,87 @@ class SoundcloudIE(InfoExtractor): 'description': info['description'], }] +class SoundcloudSetIE(InfoExtractor): + """Information extractor for soundcloud.com sets + To access the media, the uid of the song and a stream token + must be extracted from the page source and the script must make + a request to media.soundcloud.com/crossdomain.xml. Then + the media can be grabbed by requesting from an url composed + of the stream token and uid + """ + + _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)' + IE_NAME = u'soundcloud' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_resolve(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id)) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id)) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + # extract uploader (which is in the url) + uploader = mobj.group(1) + # extract simple title (uploader + slug of song title) + slug_title = mobj.group(2) + simple_title = uploader + u'-' + slug_title + + self.report_resolve('%s/sets/%s' % (uploader, slug_title)) + + url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title) + resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28' + request = compat_urllib_request.Request(resolv_url) + try: + info_json_bytes = compat_urllib_request.urlopen(request).read() + info_json = info_json_bytes.decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + return + + videos = [] + info = json.loads(info_json) + if 'errors' in info: + for err in info['errors']: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err['error_message'])) + return + + for track in info['tracks']: + video_id = track['id'] + self.report_extraction('%s/sets/%s' % (uploader, slug_title)) + + streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28' + request = compat_urllib_request.Request(streams_url) + try: + stream_json_bytes = compat_urllib_request.urlopen(request).read() + stream_json = stream_json_bytes.decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err)) + return + + streams = json.loads(stream_json) + mediaURL = streams['http_mp3_128_url'] + + videos.append({ + 'id': video_id, + 'url': mediaURL, + 'uploader': track['user']['username'], + 'upload_date': track['created_at'], + 'title': track['title'], + 'ext': u'mp3', + 'description': track['description'], + }) + return videos + class InfoQIE(InfoExtractor): """Information extractor for infoq.com""" @@ -4187,6 +4268,7 @@ def gen_extractors(): EscapistIE(), CollegeHumorIE(), XVideosIE(), + SoundcloudSetIE(), SoundcloudIE(), InfoQIE(), MixcloudIE(), -- cgit v1.2.3 From 1ee97784052d9f57ec618164a2a4c502186d93b2 Mon Sep 17 00:00:00 2001 From: Chirantan Ekbote Date: Wed, 27 Mar 2013 15:57:11 -0400 Subject: Use sys.stdout.buffer instead of sys.stdout sys.stdout defaults to text mode, we need to use the underlying buffer instead when writing binary data. Signed-off-by: Chirantan Ekbote --- youtube_dl/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 95bd94843..901b5b5ad 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -329,7 +329,7 @@ def sanitize_open(filename, open_mode): if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) - return (sys.stdout, filename) + return (sys.stdout.buffer, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: -- cgit v1.2.3 From 898280a056b577c64005647cae68caf8f16ca059 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 28 Mar 2013 13:13:03 +0100 Subject: use sys.stdout.buffer only on Python3 --- youtube_dl/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 901b5b5ad..49af7d7c0 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -329,7 +329,7 @@ def sanitize_open(filename, open_mode): if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) - return (sys.stdout.buffer, filename) + return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: -- cgit v1.2.3 From a91556fd74adf8ccfa4f923e21a0150e97d38bde Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Fri, 29 Mar 2013 00:19:58 +0100 Subject: Add a note on MaxDownloadsReached (#732, thanks to CBGoodBuddy) --- youtube_dl/FileDownloader.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'youtube_dl') diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 725d4a016..96130152d 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -548,6 +548,9 @@ class FileDownloader(object): except ExtractorError as de: # An error we somewhat expected self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback()) break + except MaxDownloadsReached: + self.to_screen(u'[info] Maximum number of downloaded files reached.') + raise except Exception as e: if self.params.get('ignoreerrors', False): self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc())) -- cgit v1.2.3 From 43113d92cc89cb6c9ff98a1b45512a92c71abb23 Mon Sep 17 00:00:00 2001 From: kkalpakloglou Date: Tue, 26 Mar 2013 22:37:08 +0200 Subject: Update InfoExtractors.py --- youtube_dl/InfoExtractors.py | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 8e164760b..eb1f32480 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -4160,6 +4160,46 @@ class SpiegelIE(InfoExtractor): } return [info] +class liveleakIE(InfoExtractor): + + _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P\d+)(?:.*)' + IE_NAME = u'liveleak' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + + video_id = mobj.group(1) + if video_id.endswith('/index.html'): + video_id = video_id[:-len('/index.html')] + + webpage = self._download_webpage(url, video_id) + + video_url = u'http://edge.liveleak.com/80281E/u/u/ll2_player_files/mp55/player.swf?config=http://www.liveleak.com/player?a=config%26item_token=' + video_id + m = re.search(r' Date: Fri, 29 Mar 2013 15:13:24 +0100 Subject: Rebased, fixed and extended LiveLeak.com support close #757 - close #761 --- youtube_dl/InfoExtractors.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index eb1f32480..45a23989a 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -4160,9 +4160,9 @@ class SpiegelIE(InfoExtractor): } return [info] -class liveleakIE(InfoExtractor): +class LiveLeakIE(InfoExtractor): - _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P\d+)(?:.*)' + _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P[\w_]+)(?:.*)' IE_NAME = u'liveleak' def _real_extract(self, url): @@ -4171,17 +4171,20 @@ class liveleakIE(InfoExtractor): self._downloader.trouble(u'ERROR: invalid URL: %s' % url) return - video_id = mobj.group(1) - if video_id.endswith('/index.html'): - video_id = video_id[:-len('/index.html')] + video_id = mobj.group('video_id') webpage = self._download_webpage(url, video_id) - video_url = u'http://edge.liveleak.com/80281E/u/u/ll2_player_files/mp55/player.swf?config=http://www.liveleak.com/player?a=config%26item_token=' + video_id + m = re.search(r'file: "(.*?)",', webpage) + if not m: + self._downloader.report_error(u'unable to find video url') + return + video_url = m.group(1) + m = re.search(r'', webpage) + if m: + uploader = clean_html(m.group(1)) + else: + uploader = None info = { 'id': video_id, 'url': video_url, 'ext': 'mp4', 'title': title, - 'description': desc + 'description': desc, + 'uploader': uploader } return [info] @@ -4250,6 +4259,6 @@ def gen_extractors(): TEDIE(), MySpassIE(), SpiegelIE(), - liveleakIE(), + LiveLeakIE(), GenericIE() ] -- cgit v1.2.3 From 1f46c152628bdd6b97212ced758b9f83063b5820 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Fri, 29 Mar 2013 15:31:38 +0100 Subject: fix SpiegelIE --- youtube_dl/InfoExtractors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 45a23989a..83cb32196 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -4128,7 +4128,7 @@ class MySpassIE(InfoExtractor): return [info] class SpiegelIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P[0-9]+)(?:\.html)?(?:#.*)$' + _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P[0-9]+)(?:\.html)?(?:#.*)?$' def _real_extract(self, url): m = re.match(self._VALID_URL, url) -- cgit v1.2.3 From 7decf8951cd500acc6ed7c9ad049996957e26d73 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Fri, 29 Mar 2013 15:59:13 +0100 Subject: fix FunnyOrDieIE, MyVideoIE, TEDIE --- youtube_dl/InfoExtractors.py | 8 ++++---- youtube_dl/utils.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 83cb32196..b3c3dbb43 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -2305,7 +2305,7 @@ class MyVideoIE(InfoExtractor): webpage = self._download_webpage(webpage_url, video_id) self.report_extraction(video_id) - mobj = re.search(r'', + mobj = re.search(r'\s+(?P.*?)</a>", webpage) + m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL) if not m: self._downloader.trouble(u'Cannot find video title') - title = unescapeHTML(m.group('title')) + title = clean_html(m.group('title')) m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage) if m: @@ -4051,7 +4051,7 @@ class TEDIE(InfoExtractor): videoName=m.group('name') webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName) # If the url includes the language we get the title translated - title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>' + title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>' title=re.search(title_RE, webpage).group('title') info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?) "id":(?P<videoID>[\d]+).*? diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 49af7d7c0..d366c4173 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -311,7 +311,7 @@ def clean_html(html): html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) - return html + return html.strip() def sanitize_open(filename, open_mode): -- cgit v1.2.3 From 1bf2801e6a6b76976de6651478893ea1619cf869 Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister <phihag@phihag.de> Date: Fri, 29 Mar 2013 21:22:57 +0100 Subject: release 2013.03.29 --- youtube_dl/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/version.py b/youtube_dl/version.py index ce8f6ca23..cb2270001 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,2 +1,2 @@ -__version__ = '2013.02.25' +__version__ = '2013.03.29' -- cgit v1.2.3 From 0fb375640990d5f1038000dc7937cd6cba6dfeb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= <jaimemf93@gmail.com> Date: Sat, 30 Mar 2013 14:11:33 +0100 Subject: Fix crash when subtitles are not found --- youtube_dl/InfoExtractors.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 2881ae67c..71f57b7c9 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -282,8 +282,14 @@ class YoutubeIE(InfoExtractor): return (None, sub_lang, sub) def _extract_subtitle(self, video_id): + """ + Return a list with a tuple: + [(error_message, sub_lang, sub)] + """ sub_lang_list = self._get_available_subtitles(video_id) sub_format = self._downloader.params.get('subtitlesformat') + if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles + return [(sub_lang_list[0], None, None)] if self._downloader.params.get('subtitleslang', False): sub_lang = self._downloader.params.get('subtitleslang') elif 'en' in sub_lang_list: @@ -291,7 +297,7 @@ class YoutubeIE(InfoExtractor): else: sub_lang = list(sub_lang_list.keys())[0] if not sub_lang in sub_lang_list: - return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None) + return [(u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None, None)] subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) return [subtitle] -- cgit v1.2.3 From 6a205c8876eda3b34bd3b1f1f875bbd1b4ebdcbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= <jaimemf93@gmail.com> Date: Sat, 30 Mar 2013 14:17:12 +0100 Subject: More fixes on subtitles errors handling --- youtube_dl/InfoExtractors.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 71f57b7c9..8caace3af 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -265,6 +265,10 @@ class YoutubeIE(InfoExtractor): self.report_video_subtitles_available(video_id, sub_lang_list) def _request_subtitle(self, sub_lang, sub_name, video_id, format): + """ + Return tuple: + (error_message, sub_lang, sub) + """ self.report_video_subtitles_request(video_id, sub_lang, format) params = compat_urllib_parse.urlencode({ 'lang': sub_lang, @@ -276,9 +280,9 @@ class YoutubeIE(InfoExtractor): try: sub = compat_urllib_request.urlopen(url).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None, None) if not sub: - return (u'WARNING: Did not fetch video subtitles', None) + return (u'WARNING: Did not fetch video subtitles', None, None) return (None, sub_lang, sub) def _extract_subtitle(self, video_id): -- cgit v1.2.3 From fa41fbd3189b36300a4558b722dea5857a7e4214 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda <filippo.valsorda@gmail.com> Date: Sun, 31 Mar 2013 03:02:05 +0200 Subject: don't catch YT user URLs in YoutubePlaylistIE (fix #754, fix #763) --- youtube_dl/InfoExtractors.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 2881ae67c..cca7e1b54 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -1710,9 +1710,7 @@ class YoutubePlaylistIE(InfoExtractor): (?: (?:course|view_play_list|my_playlists|artist|playlist|watch) \? (?:.*?&)*? (?:p|a|list)= - | user/.*?/user/ | p/ - | user/.*?#[pg]/c/ ) ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,}) .* @@ -3796,7 +3794,7 @@ class WorldStarHipHopIE(InfoExtractor): _title = r"""<title>(.*)""" mobj = re.search(_title, webpage_src) - + if mobj is not None: title = mobj.group(1) else: @@ -3814,7 +3812,7 @@ class WorldStarHipHopIE(InfoExtractor): if mobj is not None: title = mobj.group(1) thumbnail = None - + results = [{ 'id': video_id, 'url' : video_url, -- cgit v1.2.3 From bc97f6d60ceacdaffe6a6dbfd403a08ce06229eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= Date: Sun, 31 Mar 2013 12:10:12 +0200 Subject: Use report_error in subtitles error handling --- youtube_dl/InfoExtractors.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 8caace3af..13b1f99b5 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -253,11 +253,11 @@ class YoutubeIE(InfoExtractor): try: sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + return (u'unable to download video subtitles: %s' % compat_str(err), None) sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list) if not sub_lang_list: - return (u'WARNING: video doesn\'t have subtitles', None) + return (u'video doesn\'t have subtitles', None) return sub_lang_list def _list_available_subtitles(self, video_id): @@ -280,9 +280,9 @@ class YoutubeIE(InfoExtractor): try: sub = compat_urllib_request.urlopen(url).read().decode('utf-8') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None, None) + return (u'unable to download video subtitles: %s' % compat_str(err), None, None) if not sub: - return (u'WARNING: Did not fetch video subtitles', None, None) + return (u'Did not fetch video subtitles', None, None) return (None, sub_lang, sub) def _extract_subtitle(self, video_id): @@ -301,7 +301,7 @@ class YoutubeIE(InfoExtractor): else: sub_lang = list(sub_lang_list.keys())[0] if not sub_lang in sub_lang_list: - return [(u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None, None)] + return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)] subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) return [subtitle] @@ -542,14 +542,14 @@ class YoutubeIE(InfoExtractor): if video_subtitles: (sub_error, sub_lang, sub) = video_subtitles[0] if sub_error: - self._downloader.trouble(sub_error) + self._downloader.report_error(sub_error) if self._downloader.params.get('allsubtitles', False): video_subtitles = self._extract_all_subtitles(video_id) for video_subtitle in video_subtitles: (sub_error, sub_lang, sub) = video_subtitle if sub_error: - self._downloader.trouble(sub_error) + self._downloader.report_error(sub_error) if self._downloader.params.get('listsubtitles', False): sub_lang_list = self._list_available_subtitles(video_id) -- cgit v1.2.3 From ef767f9fd5e852940de999da4962657bca452c0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= Date: Sun, 31 Mar 2013 12:19:13 +0200 Subject: Fix crash when subtitles are not found and the option --all-subs is given --- youtube_dl/InfoExtractors.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'youtube_dl') diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 13b1f99b5..1bd9e25c4 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -309,6 +309,8 @@ class YoutubeIE(InfoExtractor): def _extract_all_subtitles(self, video_id): sub_lang_list = self._get_available_subtitles(video_id) sub_format = self._downloader.params.get('subtitlesformat') + if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles + return [(sub_lang_list[0], None, None)] subtitles = [] for sub_lang in sub_lang_list: subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) -- cgit v1.2.3 From bafeed9f5dd4613c6b0597f1328968658abb7cb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jaime=20Marqui=CC=81nez=20Ferra=CC=81ndiz?= Date: Sun, 31 Mar 2013 12:21:35 +0200 Subject: Don't crash in FileDownloader if subtitles couldn't be found and errors are ignored --- youtube_dl/FileDownloader.py | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index d82aa2d83..7c5a52be1 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -485,14 +485,17 @@ class FileDownloader(object): subtitle = info_dict['subtitles'][0] (sub_error, sub_lang, sub) = subtitle sub_format = self.params.get('subtitlesformat') - try: - sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format - self.report_writesubtitles(sub_filename) - with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: - subfile.write(sub) - except (OSError, IOError): - self.report_error(u'Cannot write subtitles file ' + descfn) - return + if sub_error: + self.report_warning("Some error while getting the subtitles") + else: + try: + sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format + self.report_writesubtitles(sub_filename) + with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: + subfile.write(sub) + except (OSError, IOError): + self.report_error(u'Cannot write subtitles file ' + descfn) + return if self.params.get('onlysubtitles', False): return @@ -501,14 +504,17 @@ class FileDownloader(object): sub_format = self.params.get('subtitlesformat') for subtitle in subtitles: (sub_error, sub_lang, sub) = subtitle - try: - sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format - self.report_writesubtitles(sub_filename) - with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: - subfile.write(sub) - except (OSError, IOError): - self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) - return + if sub_error: + self.report_warning("Some error while getting the subtitles") + else: + try: + sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format + self.report_writesubtitles(sub_filename) + with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: + subfile.write(sub) + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) + return if self.params.get('onlysubtitles', False): return -- cgit v1.2.3 From c2b293ba3021d323a3d8ccbabeb3ebb993b276aa Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Wed, 3 Apr 2013 19:43:53 +0200 Subject: release 2013.04.03 --- youtube_dl/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'youtube_dl') diff --git a/youtube_dl/version.py b/youtube_dl/version.py index cb2270001..c433e2eaa 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,2 +1,2 @@ -__version__ = '2013.03.29' +__version__ = '2013.04.03' -- cgit v1.2.3