aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/FileDownloader.py99
-rwxr-xr-xyoutube_dl/InfoExtractors.py550
-rw-r--r--youtube_dl/__init__.py38
-rw-r--r--youtube_dl/utils.py4
4 files changed, 439 insertions, 252 deletions
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index 57f741c30..5087b4cc8 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -78,7 +78,11 @@ class FileDownloader(object):
updatetime: Use the Last-modified header to set output file timestamps.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
- writesubtitles: Write the video subtitles to a .srt file
+ writesubtitles: Write the video subtitles to a file
+ onlysubtitles: Downloads only the subtitles of the video
+ allsubtitles: Downloads all the subtitles of the video
+ listsubtitles: Lists all available subtitles for the video
+ subtitlesformat: Subtitle format [sbv/srt] (default=srt)
subtitleslang: Language of the subtitles to download
test: Download only first bytes to test the downloader.
keepvideo: Keep the video file after post-processing
@@ -246,6 +250,18 @@ class FileDownloader(object):
warning_message=u'%s %s' % (_msg_header,message)
self.to_stderr(warning_message)
+ def report_error(self, message, tb=None):
+ '''
+ Do the same as trouble, but prefixes the message with 'ERROR:', colored
+ in red if stderr is a tty file.
+ '''
+ if sys.stderr.isatty():
+ _msg_header = u'\033[0;31mERROR:\033[0m'
+ else:
+ _msg_header = u'ERROR:'
+ error_message = u'%s %s' % (_msg_header, message)
+ self.trouble(error_message, tb)
+
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
@@ -277,7 +293,7 @@ class FileDownloader(object):
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
- self.trouble(u'ERROR: unable to rename file')
+ self.report_error(u'unable to rename file')
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
@@ -301,9 +317,9 @@ class FileDownloader(object):
""" Report that the description file is being written """
self.to_screen(u'[info] Writing video description to: ' + descfn)
- def report_writesubtitles(self, srtfn):
+ def report_writesubtitles(self, sub_filename):
""" Report that the subtitles file is being written """
- self.to_screen(u'[info] Writing video subtitles to: ' + srtfn)
+ self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
def report_writeinfojson(self, infofn):
""" Report that the metadata file has been written """
@@ -372,8 +388,11 @@ class FileDownloader(object):
filename = self.params['outtmpl'] % template_dict
return filename
- except (ValueError, KeyError) as err:
- self.trouble(u'ERROR: invalid system charset or erroneous output template')
+ except KeyError as err:
+ self.trouble(u'ERROR: Erroneous output template')
+ return None
+ except ValueError as err:
+ self.trouble(u'ERROR: Insufficient system charset ' + repr(preferredencoding()))
return None
def _match_entry(self, info_dict):
@@ -437,7 +456,7 @@ class FileDownloader(object):
if dn != '' and not os.path.exists(dn): # dn is already encoded
os.makedirs(dn)
except (OSError, IOError) as err:
- self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
+ self.report_error(u'unable to create directory ' + compat_str(err))
return
if self.params.get('writedescription', False):
@@ -447,20 +466,41 @@ class FileDownloader(object):
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
- self.trouble(u'ERROR: Cannot write description file ' + descfn)
+ self.report_error(u'Cannot write description file ' + descfn)
return
if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
+ subtitle = info_dict['subtitles'][0]
+ (sub_error, sub_lang, sub) = subtitle
+ sub_format = self.params.get('subtitlesformat')
try:
- srtfn = filename.rsplit('.', 1)[0] + u'.srt'
- self.report_writesubtitles(srtfn)
- with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile:
- srtfile.write(info_dict['subtitles'])
+ sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+ self.report_writesubtitles(sub_filename)
+ with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
+ subfile.write(sub)
except (OSError, IOError):
- self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
+ self.report_error(u'Cannot write subtitles file ' + descfn)
return
+ if self.params.get('onlysubtitles', False):
+ return
+
+ if self.params.get('allsubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
+ subtitles = info_dict['subtitles']
+ sub_format = self.params.get('subtitlesformat')
+ for subtitle in subtitles:
+ (sub_error, sub_lang, sub) = subtitle
+ try:
+ sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
+ self.report_writesubtitles(sub_filename)
+ with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
+ subfile.write(sub)
+ except (OSError, IOError):
+ self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
+ return
+ if self.params.get('onlysubtitles', False):
+ return
if self.params.get('writeinfojson', False):
infofn = filename + u'.info.json'
@@ -469,7 +509,7 @@ class FileDownloader(object):
json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
write_json_file(json_info_dict, encodeFilename(infofn))
except (OSError, IOError):
- self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
+ self.report_error(u'Cannot write metadata to JSON file ' + infofn)
return
if not self.params.get('skip_download', False):
@@ -481,17 +521,17 @@ class FileDownloader(object):
except (OSError, IOError) as err:
raise UnavailableVideoError()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self.trouble(u'ERROR: unable to download video data: %s' % str(err))
+ self.report_error(u'unable to download video data: %s' % str(err))
return
except (ContentTooShortError, ) as err:
- self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
+ self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success:
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
- self.trouble(u'ERROR: postprocessing: %s' % str(err))
+ self.report_error(u'postprocessing: %s' % str(err))
return
def download(self, url_list):
@@ -520,9 +560,12 @@ class FileDownloader(object):
except ExtractorError as de: # An error we somewhat expected
self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
break
+ except MaxDownloadsReached:
+ self.to_screen(u'[info] Maximum number of downloaded files reached.')
+ raise
except Exception as e:
if self.params.get('ignoreerrors', False):
- self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
+ self.report_error(u'' + compat_str(e), tb=compat_str(traceback.format_exc()))
break
else:
raise
@@ -536,13 +579,14 @@ class FileDownloader(object):
self.increment_downloads()
self.process_info(video)
except UnavailableVideoError:
- self.trouble(u'\nERROR: unable to download video')
+ self.to_stderr(u"\n")
+ self.report_error(u'unable to download video')
# Suitable InfoExtractor had been found; go to next URL
break
if not suitable_found:
- self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
+ self.report_error(u'no suitable InfoExtractor: %s' % url)
return self._download_retcode
@@ -577,7 +621,7 @@ class FileDownloader(object):
try:
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError):
- self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
+ self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
@@ -622,7 +666,8 @@ class FileDownloader(object):
})
return True
else:
- self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
+ self.to_stderr(u"\n")
+ self.report_error(u'rtmpdump exited with code %d' % retval)
return False
def _do_download(self, filename, info_dict):
@@ -722,7 +767,7 @@ class FileDownloader(object):
self.report_retry(count, retries)
if count > retries:
- self.trouble(u'ERROR: giving up after %s retries' % retries)
+ self.report_error(u'giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
@@ -758,12 +803,13 @@ class FileDownloader(object):
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
- self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
+ self.report_error(u'unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
- self.trouble(u'\nERROR: unable to write data: %s' % str(err))
+ self.to_stderr(u"\n")
+ self.report_error(u'unable to write data: %s' % str(err))
return False
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
@@ -789,7 +835,8 @@ class FileDownloader(object):
self.slow_down(start, byte_counter - resume_len)
if stream is None:
- self.trouble(u'\nERROR: Did not get any data blocks')
+ self.to_stderr(u"\n")
+ self.report_error(u'Did not get any data blocks')
return False
stream.close()
self.report_finish()
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index a31aa759e..b4c86cfa3 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -48,7 +48,7 @@ class InfoExtractor(object):
uploader_id: Nickname or id of the video uploader.
location: Physical location of the video.
player_url: SWF Player URL (used for rtmpdump).
- subtitles: The .srt file contents.
+ subtitles: The subtitle file contents.
urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen
@@ -126,8 +126,14 @@ class InfoExtractor(object):
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
""" Returns the data of the page as a string """
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
+ content_type = urlh.headers.get('Content-Type', '')
+ m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
+ if m:
+ encoding = m.group(1)
+ else:
+ encoding = 'utf-8'
webpage_bytes = urlh.read()
- return webpage_bytes.decode('utf-8', 'replace')
+ return webpage_bytes.decode(encoding, 'replace')
class YoutubeIE(InfoExtractor):
@@ -218,7 +224,16 @@ class YoutubeIE(InfoExtractor):
def report_video_subtitles_download(self, video_id):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id)
+ self._downloader.to_screen(u'[youtube] %s: Checking available subtitles' % video_id)
+
+ def report_video_subtitles_request(self, video_id, sub_lang, format):
+ """Report attempt to download video info webpage."""
+ self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
+
+ def report_video_subtitles_available(self, video_id, sub_lang_list):
+ """Report available subtitles."""
+ sub_lang = ",".join(list(sub_lang_list.keys()))
+ self._downloader.to_screen(u'[youtube] %s: Available subtitles for video: %s' % (video_id, sub_lang))
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
@@ -232,55 +247,63 @@ class YoutubeIE(InfoExtractor):
"""Indicate the download will use the RTMP protocol."""
self._downloader.to_screen(u'[youtube] RTMP download detected')
- def _closed_captions_xml_to_srt(self, xml_string):
- srt = ''
- texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
- # TODO parse xml instead of regex
- for n, (start, dur_tag, dur, caption) in enumerate(texts):
- if not dur: dur = '4'
- start = float(start)
- end = start + float(dur)
- start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
- end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
- caption = unescapeHTML(caption)
- caption = unescapeHTML(caption) # double cycle, intentional
- srt += str(n+1) + '\n'
- srt += start + ' --> ' + end + '\n'
- srt += caption + '\n\n'
- return srt
-
- def _extract_subtitles(self, video_id):
+ def _get_available_subtitles(self, video_id):
self.report_video_subtitles_download(video_id)
request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
try:
- srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
- srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
- srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
- if not srt_lang_list:
- return (u'WARNING: video has no closed captions', None)
- if self._downloader.params.get('subtitleslang', False):
- srt_lang = self._downloader.params.get('subtitleslang')
- elif 'en' in srt_lang_list:
- srt_lang = 'en'
- else:
- srt_lang = list(srt_lang_list.keys())[0]
- if not srt_lang in srt_lang_list:
- return (u'WARNING: no closed captions found in the specified language', None)
+ sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
+ sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
+ if not sub_lang_list:
+ return (u'WARNING: video doesn\'t have subtitles', None)
+ return sub_lang_list
+
+ def _list_available_subtitles(self, video_id):
+ sub_lang_list = self._get_available_subtitles(video_id)
+ self.report_video_subtitles_available(video_id, sub_lang_list)
+
+ def _request_subtitle(self, sub_lang, sub_name, video_id, format):
+ self.report_video_subtitles_request(video_id, sub_lang, format)
params = compat_urllib_parse.urlencode({
- 'lang': srt_lang,
- 'name': srt_lang_list[srt_lang].encode('utf-8'),
+ 'lang': sub_lang,
+ 'name': sub_name,
'v': video_id,
+ 'fmt': format,
})
url = 'http://www.youtube.com/api/timedtext?' + params
try:
- srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8')
+ sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
- if not srt_xml:
+ if not sub:
return (u'WARNING: Did not fetch video subtitles', None)
- return (None, self._closed_captions_xml_to_srt(srt_xml))
+ return (None, sub_lang, sub)
+
+ def _extract_subtitle(self, video_id):
+ sub_lang_list = self._get_available_subtitles(video_id)
+ sub_format = self._downloader.params.get('subtitlesformat')
+ if self._downloader.params.get('subtitleslang', False):
+ sub_lang = self._downloader.params.get('subtitleslang')
+ elif 'en' in sub_lang_list:
+ sub_lang = 'en'
+ else:
+ sub_lang = list(sub_lang_list.keys())[0]
+ if not sub_lang in sub_lang_list:
+ return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None)
+
+ subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+ return [subtitle]
+
+ def _extract_all_subtitles(self, video_id):
+ sub_lang_list = self._get_available_subtitles(video_id)
+ sub_format = self._downloader.params.get('subtitlesformat')
+ subtitles = []
+ for sub_lang in sub_lang_list:
+ subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+ subtitles.append(subtitle)
+ return subtitles
def _print_formats(self, formats):
print('Available formats:')
@@ -388,13 +411,13 @@ class YoutubeIE(InfoExtractor):
self.report_age_confirmation()
age_results = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
return
def _extract_id(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group(2)
return video_id
@@ -413,7 +436,7 @@ class YoutubeIE(InfoExtractor):
try:
video_webpage_bytes = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
return
video_webpage = video_webpage_bytes.decode('utf-8', 'ignore')
@@ -438,18 +461,18 @@ class YoutubeIE(InfoExtractor):
if 'token' in video_info:
break
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err))
return
if 'token' not in video_info:
if 'reason' in video_info:
- self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0])
+ self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
else:
- self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
+ self._downloader.report_error(u'"token" parameter not in video info for unknown reason')
return
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
- self._downloader.trouble(u'ERROR: "rental" videos not supported')
+ self._downloader.report_error(u'"rental" videos not supported')
return
# Start extracting information
@@ -457,7 +480,7 @@ class YoutubeIE(InfoExtractor):
# uploader
if 'author' not in video_info:
- self._downloader.trouble(u'ERROR: unable to extract uploader name')
+ self._downloader.report_error(u'unable to extract uploader name')
return
video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
@@ -467,17 +490,17 @@ class YoutubeIE(InfoExtractor):
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
- self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+ self._downloader.report_warning(u'unable to extract uploader nickname')
# title
if 'title' not in video_info:
- self._downloader.trouble(u'ERROR: unable to extract video title')
+ self._downloader.report_error(u'unable to extract video title')
return
video_title = compat_urllib_parse.unquote_plus(video_info['title'][0])
# thumbnail image
if 'thumbnail_url' not in video_info:
- self._downloader.trouble(u'WARNING: unable to extract video thumbnail')
+ self._downloader.report_warning(u'unable to extract video thumbnail')
video_thumbnail = ''
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
@@ -501,15 +524,29 @@ class YoutubeIE(InfoExtractor):
else:
video_description = ''
- # closed captions
+ # subtitles
video_subtitles = None
+
if self._downloader.params.get('writesubtitles', False):
- (srt_error, video_subtitles) = self._extract_subtitles(video_id)
- if srt_error:
- self._downloader.trouble(srt_error)
+ video_subtitles = self._extract_subtitle(video_id)
+ if video_subtitles:
+ (sub_error, sub_lang, sub) = video_subtitles[0]
+ if sub_error:
+ self._downloader.trouble(sub_error)
+
+ if self._downloader.params.get('allsubtitles', False):
+ video_subtitles = self._extract_all_subtitles(video_id)
+ for video_subtitle in video_subtitles:
+ (sub_error, sub_lang, sub) = video_subtitle
+ if sub_error:
+ self._downloader.trouble(sub_error)
+
+ if self._downloader.params.get('listsubtitles', False):
+ sub_lang_list = self._list_available_subtitles(video_id)
+ return
if 'length_seconds' not in video_info:
- self._downloader.trouble(u'WARNING: unable to extract video duration')
+ self._downloader.report_warning(u'unable to extract video duration')
video_duration = ''
else:
video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0])
@@ -537,7 +574,7 @@ class YoutubeIE(InfoExtractor):
format_list = available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
- self._downloader.trouble(u'ERROR: no known formats available for video')
+ self._downloader.report_error(u'no known formats available for video')
return
if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats)
@@ -558,10 +595,10 @@ class YoutubeIE(InfoExtractor):
video_url_list = [(rf, url_map[rf])]
break
if video_url_list is None:
- self._downloader.trouble(u'ERROR: requested format not available')
+ self._downloader.report_error(u'requested format not available')
return
else:
- self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info')
+ self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info')
return
results = []
@@ -624,7 +661,7 @@ class MetacafeIE(InfoExtractor):
self.report_disclaimer()
disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to retrieve disclaimer: %s' % compat_str(err))
return
# Confirm age
@@ -637,14 +674,14 @@ class MetacafeIE(InfoExtractor):
self.report_age_confirmation()
disclaimer = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to confirm age: %s' % compat_str(err))
return
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group(1)
@@ -661,7 +698,7 @@ class MetacafeIE(InfoExtractor):
self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err))
return
# Extract URL, uploader and title from webpage
@@ -681,15 +718,15 @@ class MetacafeIE(InfoExtractor):
else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract media URL')
+ self._downloader.report_error(u'unable to extract media URL')
return
vardict = compat_parse_qs(mobj.group(1))
if 'mediaData' not in vardict:
- self._downloader.trouble(u'ERROR: unable to extract media URL')
+ self._downloader.report_error(u'unable to extract media URL')
return
mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract media URL')
+ self._downloader.report_error(u'unable to extract media URL')
return
mediaURL = mobj.group(1).replace('\\/', '/')
video_extension = mediaURL[-3:]
@@ -697,13 +734,13 @@ class MetacafeIE(InfoExtractor):
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'submitter=(.*?);', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
+ self._downloader.report_error(u'unable to extract uploader nickname')
return
video_uploader = mobj.group(1)
@@ -735,7 +772,7 @@ class DailymotionIE(InfoExtractor):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group(1).split('_')[0].split('?')[0]
@@ -751,7 +788,7 @@ class DailymotionIE(InfoExtractor):
self.report_extraction(video_id)
mobj = re.search(r'\s*var flashvars = (.*)', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract media URL')
+ self._downloader.report_error(u'unable to extract media URL')
return
flashvars = compat_urllib_parse.unquote(mobj.group(1))
@@ -761,12 +798,12 @@ class DailymotionIE(InfoExtractor):
self._downloader.to_screen(u'[dailymotion] Using %s' % key)
break
else:
- self._downloader.trouble(u'ERROR: unable to extract video URL')
+ self._downloader.report_error(u'unable to extract video URL')
return
mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video URL')
+ self._downloader.report_error(u'unable to extract video URL')
return
video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/')
@@ -775,7 +812,7 @@ class DailymotionIE(InfoExtractor):
mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
video_title = unescapeHTML(mobj.group('title'))
@@ -785,7 +822,7 @@ class DailymotionIE(InfoExtractor):
# lookin for official user
mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage)
if mobj_official is None:
- self._downloader.trouble(u'WARNING: unable to extract uploader nickname')
+ self._downloader.report_warning(u'unable to extract uploader nickname')
else:
video_uploader = mobj_official.group(1)
else:
@@ -827,7 +864,7 @@ class PhotobucketIE(InfoExtractor):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
video_id = mobj.group(1)
@@ -840,14 +877,14 @@ class PhotobucketIE(InfoExtractor):
self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
# Extract URL, uploader, and title from webpage
self.report_extraction(video_id)
mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract media URL')
+ self._downloader.report_error(u'unable to extract media URL')
return
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
@@ -855,7 +892,7 @@ class PhotobucketIE(InfoExtractor):
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
video_title = mobj.group(1).decode('utf-8')
@@ -896,7 +933,7 @@ class YahooIE(InfoExtractor):
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
video_id = mobj.group(2)
@@ -909,18 +946,18 @@ class YahooIE(InfoExtractor):
try:
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: Unable to extract id field')
+ self._downloader.report_error(u'Unable to extract id field')
return
yahoo_id = mobj.group(1)
mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: Unable to extract vid field')
+ self._downloader.report_error(u'Unable to extract vid field')
return
yahoo_vid = mobj.group(1)
@@ -933,34 +970,34 @@ class YahooIE(InfoExtractor):
self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
# Extract uploader and title from webpage
self.report_extraction(video_id)
mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video title')
+ self._downloader.report_error(u'unable to extract video title')
return
video_title = mobj.group(1).decode('utf-8')
mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video uploader')
+ self._downloader.report_error(u'unable to extract video uploader')
return
video_uploader = mobj.group(1).decode('utf-8')
# Extract video thumbnail
mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+ self._downloader.report_error(u'unable to extract video thumbnail')
return
video_thumbnail = mobj.group(1).decode('utf-8')
# Extract video description
mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video description')
+ self._downloader.report_error(u'unable to extract video description')
return
video_description = mobj.group(1).decode('utf-8')
if not video_description:
@@ -969,13 +1006,13 @@ class YahooIE(InfoExtractor):
# Extract video height and width
mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video height')
+ self._downloader.report_error(u'unable to extract video height')
return
yv_video_height = mobj.group(1)
mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video width')
+ self._downloader.report_error(u'unable to extract video width')
return
yv_video_width = mobj.group(1)
@@ -991,13 +1028,13 @@ class YahooIE(InfoExtractor):
self.report_download_webpage(video_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
# Extract media URL from playlist XML
mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: Unable to extract media URL')
+ self._downloader.report_error(u'Unable to extract media URL')
return
video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
video_url = unescapeHTML(video_url)
@@ -1036,7 +1073,7 @@ class VimeoIE(InfoExtractor):
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
video_id = mobj.group('id')
@@ -1052,7 +1089,7 @@ class VimeoIE(InfoExtractor):
webpage_bytes = compat_urllib_request.urlopen(request).read()
webpage = webpage_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
# Now we begin extracting as much information as we can from what we
@@ -1065,7 +1102,7 @@ class VimeoIE(InfoExtractor):
config = webpage.split(' = {config:')[1].split(',assets:')[0]
config = json.loads(config)
except:
- self._downloader.trouble(u'ERROR: unable to extract info section')
+ self._downloader.report_error(u'unable to extract info section')
return
# Extract title
@@ -1115,7 +1152,7 @@ class VimeoIE(InfoExtractor):
self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
break
else:
- self._downloader.trouble(u'ERROR: no known codec found')
+ self._downloader.report_error(u'no known codec found')
return
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
@@ -1159,10 +1196,10 @@ class ArteTvIE(InfoExtractor):
self.report_download_webpage(url)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
except ValueError as err:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
return webpage
@@ -1172,7 +1209,7 @@ class ArteTvIE(InfoExtractor):
info = {}
if mobj is None:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
for (i, key, err) in matchTuples:
@@ -1281,7 +1318,8 @@ class GenericIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
+ if not self._downloader.params.get('test', False):
+ self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
@@ -1351,17 +1389,12 @@ class GenericIE(InfoExtractor):
if self._test_redirect(url): return
video_id = url.split('/')[-1]
- request = compat_urllib_request.Request(url)
try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(url, video_id)
except ValueError as err:
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
self.report_extraction(video_id)
@@ -1374,13 +1407,13 @@ class GenericIE(InfoExtractor):
# Broaden the search a little bit: JWPlayer JS loader
mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
# It's possible that one of the regexes
# matched, but returned an empty group:
if mobj.group(1) is None:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
video_url = compat_urllib_parse.unquote(mobj.group(1))
@@ -1398,14 +1431,14 @@ class GenericIE(InfoExtractor):
# and so on and so forth; it's just not practical
mobj = re.search(r'<title>(.*)</title>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
video_title = mobj.group(1)
# video uploader is domain name
mobj = re.match(r'(?:https?://)?([^/]*)/.*', url)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
video_uploader = mobj.group(1)
@@ -1437,7 +1470,7 @@ class YoutubeSearchIE(InfoExtractor):
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+ self._downloader.report_error(u'invalid search query "%s"' % query)
return
prefix, query = query.split(':')
@@ -1453,7 +1486,7 @@ class YoutubeSearchIE(InfoExtractor):
try:
n = int(prefix)
if n <= 0:
- self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+ self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_youtube_results:
self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
@@ -1478,7 +1511,7 @@ class YoutubeSearchIE(InfoExtractor):
try:
data = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download API page: %s' % compat_str(err))
return
api_response = json.loads(data)['data']
@@ -1519,7 +1552,7 @@ class GoogleSearchIE(InfoExtractor):
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+ self._downloader.report_error(u'invalid search query "%s"' % query)
return
prefix, query = query.split(':')
@@ -1535,7 +1568,7 @@ class GoogleSearchIE(InfoExtractor):
try:
n = int(prefix)
if n <= 0:
- self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+ self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_google_results:
self._downloader.report_warning(u'gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n))
@@ -1559,7 +1592,7 @@ class GoogleSearchIE(InfoExtractor):
try:
page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
# Extract video identifiers
@@ -1603,7 +1636,7 @@ class YahooSearchIE(InfoExtractor):
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid search query "%s"' % query)
+ self._downloader.report_error(u'invalid search query "%s"' % query)
return
prefix, query = query.split(':')
@@ -1619,7 +1652,7 @@ class YahooSearchIE(InfoExtractor):
try:
n = int(prefix)
if n <= 0:
- self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query))
+ self._downloader.report_error(u'invalid download number %s for query "%s"' % (n, query))
return
elif n > self._max_yahoo_results:
self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
@@ -1644,7 +1677,7 @@ class YahooSearchIE(InfoExtractor):
try:
page = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
# Extract video identifiers
@@ -1706,7 +1739,7 @@ class YoutubePlaylistIE(InfoExtractor):
# Extract playlist id
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+ self._downloader.report_error(u'invalid url: %s' % url)
return
# Download playlist videos from API
@@ -1721,17 +1754,17 @@ class YoutubePlaylistIE(InfoExtractor):
try:
page = compat_urllib_request.urlopen(url).read().decode('utf8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
try:
response = json.loads(page)
except ValueError as err:
- self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err))
+ self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
return
if not 'feed' in response or not 'entry' in response['feed']:
- self._downloader.trouble(u'ERROR: Got a malformed response from YouTube API')
+ self._downloader.report_error(u'Got a malformed response from YouTube API')
return
videos += [ (entry['yt$position']['$t'], entry['content']['src'])
for entry in response['feed']['entry']
@@ -1777,7 +1810,7 @@ class YoutubeChannelIE(InfoExtractor):
# Extract channel id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+ self._downloader.report_error(u'invalid url: %s' % url)
return
# Download channel pages
@@ -1792,7 +1825,7 @@ class YoutubeChannelIE(InfoExtractor):
try:
page = compat_urllib_request.urlopen(request).read().decode('utf8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
# Extract video identifiers
@@ -1835,7 +1868,7 @@ class YoutubeUserIE(InfoExtractor):
# Extract username
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+ self._downloader.report_error(u'invalid url: %s' % url)
return
username = mobj.group(1)
@@ -1857,7 +1890,7 @@ class YoutubeUserIE(InfoExtractor):
try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
# Extract video identifiers
@@ -1915,7 +1948,7 @@ class BlipTVUserIE(InfoExtractor):
# Extract username
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid url: %s' % url)
+ self._downloader.report_error(u'invalid url: %s' % url)
return
username = mobj.group(1)
@@ -1929,7 +1962,7 @@ class BlipTVUserIE(InfoExtractor):
mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
@@ -1948,7 +1981,7 @@ class BlipTVUserIE(InfoExtractor):
try:
page = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % str(err))
return
# Extract video identifiers
@@ -2012,7 +2045,7 @@ class DepositFilesIE(InfoExtractor):
self.report_download_webpage(file_id)
webpage = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve file webpage: %s' % compat_str(err))
return
# Search for the real file URL
@@ -2022,9 +2055,9 @@ class DepositFilesIE(InfoExtractor):
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
if (mobj is not None) and (mobj.group(1) is not None):
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
- self._downloader.trouble(u'ERROR: %s' % restriction_message)
+ self._downloader.report_error(u'%s' % restriction_message)
else:
- self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url)
+ self._downloader.report_error(u'unable to extract download URL from: %s' % url)
return
file_url = mobj.group(1)
@@ -2033,7 +2066,7 @@ class DepositFilesIE(InfoExtractor):
# Search for file title
mobj = re.search(r'<b title="(.*?)">', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
file_title = mobj.group(1).decode('utf-8')
@@ -2106,7 +2139,7 @@ class FacebookIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group('ID')
@@ -2162,7 +2195,7 @@ class BlipTVIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
urlp = compat_urllib_parse_urlparse(url)
@@ -2209,7 +2242,7 @@ class BlipTVIE(InfoExtractor):
json_code_bytes = urlh.read()
json_code = json_code_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to read video info webpage: %s' % compat_str(err))
return
try:
@@ -2240,7 +2273,7 @@ class BlipTVIE(InfoExtractor):
'user_agent': 'iTunes/10.6.1',
}
except (ValueError,KeyError) as err:
- self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
+ self._downloader.report_error(u'unable to parse video information: %s' % repr(err))
return
return [info]
@@ -2262,7 +2295,7 @@ class MyVideoIE(InfoExtractor):
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._download.trouble(u'ERROR: invalid URL: %s' % url)
+ self._download.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group(1)
@@ -2272,16 +2305,16 @@ class MyVideoIE(InfoExtractor):
webpage = self._download_webpage(webpage_url, video_id)
self.report_extraction(video_id)
- mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\' />',
+ mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/.*?\.jpg\'',
webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract media URL')
+ self._downloader.report_error(u'unable to extract media URL')
return
video_url = mobj.group(1) + ('/%s.flv' % video_id)
mobj = re.search('<title>([^<]+)</title>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
video_title = mobj.group(1)
@@ -2354,7 +2387,7 @@ class ComedyCentralIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
if mobj.group('shortname'):
@@ -2385,16 +2418,16 @@ class ComedyCentralIE(InfoExtractor):
html = htmlHandle.read()
webpage = html.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
if dlNewest:
url = htmlHandle.geturl()
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None:
- self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url)
+ self._downloader.report_error(u'Invalid redirected URL: ' + url)
return
if mobj.group('episode') == '':
- self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url)
+ self._downloader.report_error(u'Redirected URL is still not specific: ' + url)
return
epTitle = mobj.group('episode')
@@ -2407,7 +2440,7 @@ class ComedyCentralIE(InfoExtractor):
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
if len(altMovieParams) == 0:
- self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url)
+ self._downloader.report_error(u'unable to find Flash URL in webpage ' + url)
return
else:
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
@@ -2418,7 +2451,7 @@ class ComedyCentralIE(InfoExtractor):
try:
indexXml = compat_urllib_request.urlopen(indexUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
+ self._downloader.report_error(u'unable to download episode index: ' + compat_str(err))
return
results = []
@@ -2439,7 +2472,7 @@ class ComedyCentralIE(InfoExtractor):
try:
configXml = compat_urllib_request.urlopen(configReq).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
return
cdoc = xml.etree.ElementTree.fromstring(configXml)
@@ -2449,7 +2482,7 @@ class ComedyCentralIE(InfoExtractor):
turls.append(finfo)
if len(turls) == 0:
- self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found')
+ self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
continue
if self._downloader.params.get('listformats', None):
@@ -2506,7 +2539,7 @@ class EscapistIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
showName = mobj.group('showname')
videoId = mobj.group('episode')
@@ -2518,7 +2551,7 @@ class EscapistIE(InfoExtractor):
m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
+ self._downloader.report_error(u'unable to download webpage: ' + compat_str(err))
return
descMatch = re.search('<meta name="description" content="([^"]*)"', webPage)
@@ -2536,7 +2569,7 @@ class EscapistIE(InfoExtractor):
m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type'])
configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
+ self._downloader.report_error(u'unable to download configuration: ' + compat_str(err))
return
# Technically, it's JavaScript, not JSON
@@ -2545,7 +2578,7 @@ class EscapistIE(InfoExtractor):
try:
config = json.loads(configJSON)
except (ValueError,) as err:
- self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
+ self._downloader.report_error(u'Invalid JSON in configuration file: ' + compat_str(err))
return
playlist = config['playlist']
@@ -2583,7 +2616,7 @@ class CollegeHumorIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group('videoid')
@@ -2598,7 +2631,7 @@ class CollegeHumorIE(InfoExtractor):
try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
return
mdoc = xml.etree.ElementTree.fromstring(metaXml)
@@ -2609,7 +2642,7 @@ class CollegeHumorIE(InfoExtractor):
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
manifest_url = videoNode.findall('./file')[0].text
except IndexError:
- self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+ self._downloader.report_error(u'Invalid metadata XML file')
return
manifest_url += '?hdcore=2.10.3'
@@ -2617,7 +2650,7 @@ class CollegeHumorIE(InfoExtractor):
try:
manifestXml = compat_urllib_request.urlopen(manifest_url).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
return
adoc = xml.etree.ElementTree.fromstring(manifestXml)
@@ -2626,7 +2659,7 @@ class CollegeHumorIE(InfoExtractor):
node_id = media_node.attrib['url']
video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
except IndexError as err:
- self._downloader.trouble(u'\nERROR: Invalid manifest file')
+ self._downloader.report_error(u'Invalid manifest file')
return
url_pr = compat_urllib_parse_urlparse(manifest_url)
@@ -2650,7 +2683,7 @@ class XVideosIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group(1)
@@ -2662,7 +2695,7 @@ class XVideosIE(InfoExtractor):
# Extract video URL
mobj = re.search(r'flv_url=(.+?)&', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video url')
+ self._downloader.report_error(u'unable to extract video url')
return
video_url = compat_urllib_parse.unquote(mobj.group(1))
@@ -2670,7 +2703,7 @@ class XVideosIE(InfoExtractor):
# Extract title
mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video title')
+ self._downloader.report_error(u'unable to extract video title')
return
video_title = mobj.group(1)
@@ -2678,7 +2711,7 @@ class XVideosIE(InfoExtractor):
# Extract video thumbnail
mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+ self._downloader.report_error(u'unable to extract video thumbnail')
return
video_thumbnail = mobj.group(0)
@@ -2722,7 +2755,7 @@ class SoundcloudIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
# extract uploader (which is in the url)
@@ -2740,7 +2773,7 @@ class SoundcloudIE(InfoExtractor):
info_json_bytes = compat_urllib_request.urlopen(request).read()
info_json = info_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err))
return
info = json.loads(info_json)
@@ -2753,7 +2786,7 @@ class SoundcloudIE(InfoExtractor):
stream_json_bytes = compat_urllib_request.urlopen(request).read()
stream_json = stream_json_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download stream definitions: %s' % compat_str(err))
return
streams = json.loads(stream_json)
@@ -2781,7 +2814,7 @@ class InfoQIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
webpage = self._download_webpage(url, video_id=url)
@@ -2790,7 +2823,7 @@ class InfoQIE(InfoExtractor):
# Extract video URL
mobj = re.search(r"jsclassref='([^']*)'", webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video url')
+ self._downloader.report_error(u'unable to extract video url')
return
real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
@@ -2798,7 +2831,7 @@ class InfoQIE(InfoExtractor):
# Extract title
mobj = re.search(r'contentTitle = "(.*?)";', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video title')
+ self._downloader.report_error(u'unable to extract video title')
return
video_title = mobj.group(1)
@@ -2881,7 +2914,7 @@ class MixcloudIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
# extract uploader & filename from url
uploader = mobj.group(1).decode('utf-8')
@@ -2895,7 +2928,7 @@ class MixcloudIE(InfoExtractor):
self.report_download_json(file_url)
jsonData = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve file: %s' % compat_str(err))
return
# parse JSON
@@ -2919,7 +2952,7 @@ class MixcloudIE(InfoExtractor):
break # got it!
else:
if req_format not in formats:
- self._downloader.trouble(u'ERROR: format is not available')
+ self._downloader.report_error(u'format is not available')
return
url_list = self.get_urls(formats, req_format)
@@ -2973,14 +3006,14 @@ class StanfordOpenClassroomIE(InfoExtractor):
try:
metaXml = compat_urllib_request.urlopen(xmlUrl).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video info XML: %s' % compat_str(err))
return
mdoc = xml.etree.ElementTree.fromstring(metaXml)
try:
info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError:
- self._downloader.trouble(u'\nERROR: Invalid metadata XML file')
+ self._downloader.report_error(u'Invalid metadata XML file')
return
info['ext'] = info['url'].rpartition('.')[2]
return [info]
@@ -3032,7 +3065,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
try:
rootpage = compat_urllib_request.urlopen(rootURL).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
+ self._downloader.report_error(u'unable to download course info page: ' + compat_str(err))
return
info['title'] = info['id']
@@ -3064,7 +3097,7 @@ class MTVIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
if not mobj.group('proto'):
url = 'http://' + url
@@ -3074,25 +3107,25 @@ class MTVIE(InfoExtractor):
mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract song name')
+ self._downloader.report_error(u'unable to extract song name')
return
song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract performer')
+ self._downloader.report_error(u'unable to extract performer')
return
performer = unescapeHTML(mobj.group(1).decode('iso-8859-1'))
video_title = performer + ' - ' + song_name
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to mtvn_uri')
+ self._downloader.report_error(u'unable to mtvn_uri')
return
mtvn_uri = mobj.group(1)
mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract content id')
+ self._downloader.report_error(u'unable to extract content id')
return
content_id = mobj.group(1)
@@ -3102,7 +3135,7 @@ class MTVIE(InfoExtractor):
try:
metadataXml = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video metadata: %s' % compat_str(err))
return
mdoc = xml.etree.ElementTree.fromstring(metadataXml)
@@ -3174,7 +3207,7 @@ class YoukuIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group('ID')
@@ -3185,7 +3218,7 @@ class YoukuIE(InfoExtractor):
self.report_download_webpage(video_id)
jsondata = compat_urllib_request.urlopen(request).read()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
self.report_extraction(video_id)
@@ -3216,7 +3249,7 @@ class YoukuIE(InfoExtractor):
fileid = config['data'][0]['streamfileids'][format]
keys = [s['k'] for s in config['data'][0]['segs'][format]]
except (UnicodeDecodeError, ValueError, KeyError):
- self._downloader.trouble(u'ERROR: unable to extract info section')
+ self._downloader.report_error(u'unable to extract info section')
return
files_info=[]
@@ -3263,7 +3296,7 @@ class XNXXIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group(1)
@@ -3274,24 +3307,24 @@ class XNXXIE(InfoExtractor):
webpage_bytes = compat_urllib_request.urlopen(url).read()
webpage = webpage_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
+ self._downloader.report_error(u'unable to download video webpage: %s' % err)
return
result = re.search(self.VIDEO_URL_RE, webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract video url')
+ self._downloader.report_error(u'unable to extract video url')
return
video_url = compat_urllib_parse.unquote(result.group(1))
result = re.search(self.VIDEO_TITLE_RE, webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract video title')
+ self._downloader.report_error(u'unable to extract video title')
return
video_title = result.group(1)
result = re.search(self.VIDEO_THUMB_RE, webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract video thumbnail')
+ self._downloader.report_error(u'unable to extract video thumbnail')
return
video_thumbnail = result.group(1)
@@ -3340,7 +3373,7 @@ class GooglePlusIE(InfoExtractor):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
+ self._downloader.report_error(u'Invalid URL: %s' % url)
return
post_url = mobj.group(0)
@@ -3354,7 +3387,7 @@ class GooglePlusIE(InfoExtractor):
try:
webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve entry webpage: %s' % compat_str(err))
return
# Extract update date
@@ -3389,14 +3422,14 @@ class GooglePlusIE(InfoExtractor):
pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]'
mobj = re.search(pattern, webpage)
if mobj is None:
- self._downloader.trouble(u'ERROR: unable to extract video page URL')
+ self._downloader.report_error(u'unable to extract video page URL')
video_page = mobj.group(1)
request = compat_urllib_request.Request(video_page)
try:
webpage = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
+ self._downloader.report_error(u'Unable to retrieve video webpage: %s' % compat_str(err))
return
self.report_extract_vid_page(video_page)
@@ -3406,7 +3439,7 @@ class GooglePlusIE(InfoExtractor):
pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage)
if len(mobj) == 0:
- self._downloader.trouble(u'ERROR: unable to extract video links')
+ self._downloader.report_error(u'unable to extract video links')
# Sort in resolution
links = sorted(mobj)
@@ -3438,7 +3471,7 @@ class NBAIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group(1)
@@ -3494,13 +3527,13 @@ class JustinTVIE(InfoExtractor):
webpage_bytes = urlh.read()
webpage = webpage_bytes.decode('utf-8', 'ignore')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
+ self._downloader.report_error(u'unable to download video info JSON: %s' % compat_str(err))
return
response = json.loads(webpage)
if type(response) != list:
error_text = response.get('error', 'unknown error')
- self._downloader.trouble(u'ERROR: Justin.tv API: %s' % error_text)
+ self._downloader.report_error(u'Justin.tv API: %s' % error_text)
return
info = []
for clip in response:
@@ -3525,7 +3558,7 @@ class JustinTVIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
api = 'http://api.justin.tv'
@@ -3560,7 +3593,7 @@ class FunnyOrDieIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group('id')
@@ -3568,13 +3601,13 @@ class FunnyOrDieIE(InfoExtractor):
m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL)
if not m:
- self._downloader.trouble(u'ERROR: unable to find video information')
+ self._downloader.report_error(u'unable to find video information')
video_url = unescapeHTML(m.group('url'))
- m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
+ m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
if not m:
self._downloader.trouble(u'Cannot find video title')
- title = unescapeHTML(m.group('title'))
+ title = clean_html(m.group('title'))
m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
if m:
@@ -3621,7 +3654,7 @@ class SteamIE(InfoExtractor):
video_url = vid.group('videoURL')
video_thumb = thumb.group('thumbnail')
if not video_url:
- self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id)
+ self._downloader.report_error(u'Cannot find video url for %s' % video_id)
info = {
'id':video_id,
'url':video_url,
@@ -3767,7 +3800,7 @@ class YouPornIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group('videoid')
@@ -3859,7 +3892,7 @@ class YouPornIE(InfoExtractor):
else:
format = self._specific( req_format, formats )
if result is None:
- self._downloader.trouble(u'ERROR: requested format not available')
+ self._downloader.report_error(u'requested format not available')
return
return [format]
@@ -3872,7 +3905,7 @@ class PornotubeIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group('videoid')
@@ -3885,7 +3918,7 @@ class PornotubeIE(InfoExtractor):
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
result = re.search(VIDEO_URL_RE, webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract video url')
+ self._downloader.report_error(u'unable to extract video url')
return
video_url = compat_urllib_parse.unquote(result.group('url'))
@@ -3893,7 +3926,7 @@ class PornotubeIE(InfoExtractor):
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
result = re.search(VIDEO_UPLOADED_RE, webpage)
if result is None:
- self._downloader.trouble(u'ERROR: unable to extract video title')
+ self._downloader.report_error(u'unable to extract video title')
return
upload_date = result.group('date')
@@ -3914,7 +3947,7 @@ class YouJizzIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
- self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ self._downloader.report_error(u'invalid URL: %s' % url)
return
video_id = mobj.group('videoid')
@@ -4009,11 +4042,11 @@ class KeekIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
title = unescapeHTML(m.group('title'))
- m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
- uploader = unescapeHTML(m.group('uploader'))
+ m = re.search(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>', webpage)
+ uploader = clean_html(m.group('uploader'))
info = {
- 'id':video_id,
- 'url':video_url,
+ 'id': video_id,
+ 'url': video_url,
'ext': 'mp4',
'title': title,
'thumbnail': thumbnail,
@@ -4074,7 +4107,7 @@ class TEDIE(InfoExtractor):
videoName=m.group('name')
webpage=self._download_webpage(url, video_id, 'Downloading \"%s\" page' % videoName)
# If the url includes the language we get the title translated
- title_RE=r'<h1><span id="altHeadline" >(?P<title>.*)</span></h1>'
+ title_RE=r'<span id="altHeadline" >(?P<title>.*)</span>'
title=re.search(title_RE, webpage).group('title')
info_RE=r'''<script\ type="text/javascript">var\ talkDetails\ =(.*?)
"id":(?P<videoID>[\d]+).*?
@@ -4115,13 +4148,13 @@ class MySpassIE(InfoExtractor):
# extract values from metadata
url_flv_el = metadata.find('url_flv')
if url_flv_el is None:
- self._downloader.trouble(u'ERROR: unable to extract download url')
+ self._downloader.report_error(u'unable to extract download url')
return
video_url = url_flv_el.text
extension = os.path.splitext(video_url)[1][1:]
title_el = metadata.find('title')
if title_el is None:
- self._downloader.trouble(u'ERROR: unable to extract title')
+ self._downloader.report_error(u'unable to extract title')
return
title = title_el.text
format_id_el = metadata.find('format_id')
@@ -4150,6 +4183,89 @@ class MySpassIE(InfoExtractor):
}
return [info]
+class SpiegelIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<div class="spVideoTitle">(.*?)</div>', webpage)
+ if not m:
+ raise ExtractorError(u'Cannot find title')
+ video_title = unescapeHTML(m.group(1))
+
+ xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
+ xml_code = self._download_webpage(xml_url, video_id,
+ note=u'Downloading XML', errnote=u'Failed to download XML')
+
+ idoc = xml.etree.ElementTree.fromstring(xml_code)
+ last_type = idoc[-1]
+ filename = last_type.findall('./filename')[0].text
+ duration = float(last_type.findall('./duration')[0].text)
+
+ video_url = 'http://video2.spiegel.de/flash/' + filename
+ video_ext = filename.rpartition('.')[2]
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': video_ext,
+ 'title': video_title,
+ 'duration': duration,
+ }
+ return [info]
+
+class LiveLeakIE(InfoExtractor):
+
+ _VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
+ IE_NAME = u'liveleak'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('video_id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ m = re.search(r'file: "(.*?)",', webpage)
+ if not m:
+ self._downloader.report_error(u'unable to find video url')
+ return
+ video_url = m.group(1)
+
+ m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
+ if not m:
+ self._downloader.trouble(u'Cannot find video title')
+ title = unescapeHTML(m.group('title')).replace('LiveLeak.com -', '').strip()
+
+ m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
+ if m:
+ desc = unescapeHTML(m.group('desc'))
+ else:
+ desc = None
+
+ m = re.search(r'By:.*?(\w+)</a>', webpage)
+ if m:
+ uploader = clean_html(m.group(1))
+ else:
+ uploader = None
+
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': title,
+ 'description': desc,
+ 'uploader': uploader
+ }
+
+ return [info]
+
+
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
@@ -4199,7 +4315,7 @@ def gen_extractors():
KeekIE(),
TEDIE(),
MySpassIE(),
+ SpiegelIE(),
+ LiveLeakIE(),
GenericIE()
]
-
-
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 3983e2f0e..807b73541 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -173,12 +173,24 @@ def parseOpts():
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-F', '--list-formats',
action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
- video_format.add_option('--write-srt',
+ video_format.add_option('--write-sub', '--write-srt',
action='store_true', dest='writesubtitles',
- help='write video closed captions to a .srt file (currently youtube only)', default=False)
- video_format.add_option('--srt-lang',
+ help='write subtitle file (currently youtube only)', default=False)
+ video_format.add_option('--only-sub',
+ action='store_true', dest='onlysubtitles',
+ help='downloads only the subtitles (no video)', default=False)
+ video_format.add_option('--all-subs',
+ action='store_true', dest='allsubtitles',
+ help='downloads all the available subtitles of the video (currently youtube only)', default=False)
+ video_format.add_option('--list-subs',
+ action='store_true', dest='listsubtitles',
+ help='lists all available subtitles for the video (currently youtube only)', default=False)
+ video_format.add_option('--sub-format',
+ action='store', dest='subtitlesformat', metavar='LANG',
+ help='subtitle format [srt/sbv] (default=srt) (currently youtube only)', default='srt')
+ video_format.add_option('--sub-lang', '--srt-lang',
action='store', dest='subtitleslang', metavar='LANG',
- help='language of the closed captions to download (optional) use IETF language tags like \'en\'')
+ help='language of the subtitles to download (optional) use IETF language tags like \'en\'')
verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False)
@@ -274,12 +286,20 @@ def parseOpts():
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
if xdg_config_home:
- userConf = os.path.join(xdg_config_home, 'youtube-dl.conf')
+ userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
- userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
- argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:]
+ userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
+ systemConf = _readOptions('/etc/youtube-dl.conf')
+ userConf = _readOptions(userConfFile)
+ commandLineConf = sys.argv[1:]
+ argv = systemConf + userConf + commandLineConf
opts, args = parser.parse_args(argv)
+ if opts.verbose:
+ print(u'[debug] System config: ' + repr(systemConf))
+ print(u'[debug] User config: ' + repr(userConf))
+ print(u'[debug] Command-line args: ' + repr(commandLineConf))
+
return parser, opts, args
def _real_main():
@@ -450,6 +470,10 @@ def _real_main():
'writedescription': opts.writedescription,
'writeinfojson': opts.writeinfojson,
'writesubtitles': opts.writesubtitles,
+ 'onlysubtitles': opts.onlysubtitles,
+ 'allsubtitles': opts.allsubtitles,
+ 'listsubtitles': opts.listsubtitles,
+ 'subtitlesformat': opts.subtitlesformat,
'subtitleslang': opts.subtitleslang,
'matchtitle': decodeOption(opts.matchtitle),
'rejecttitle': decodeOption(opts.rejecttitle),
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 95bd94843..d366c4173 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -311,7 +311,7 @@ def clean_html(html):
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
- return html
+ return html.strip()
def sanitize_open(filename, open_mode):
@@ -329,7 +329,7 @@ def sanitize_open(filename, open_mode):
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
- return (sys.stdout, filename)
+ return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err: