aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
authorJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>2013-04-19 21:57:08 +0200
committerJaime Marquínez Ferrándiz <jaime.marquinez.ferrandiz@gmail.com>2013-04-19 21:57:08 +0200
commitdce9027045f061649bc20f6ef48cbbce8cec87df (patch)
treed391525957879dd547da18c98299f9a187af4e3e /youtube_dl
parentfeba604e9256b48972dfe4b5658ada4b300581cd (diff)
parentd281274bf250065f876bb4f75fb6f711e1a26eba (diff)
downloadyoutube-dl-dce9027045f061649bc20f6ef48cbbce8cec87df.tar.xz
Merge branch 'extract_info_rewrite'
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/FileDownloader.py157
-rwxr-xr-xyoutube_dl/InfoExtractors.py88
2 files changed, 148 insertions, 97 deletions
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index 96da754fb..03346ab04 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -393,6 +393,8 @@ class FileDownloader(object):
autonumber_size = 5
autonumber_templ = u'%0' + str(autonumber_size) + u'd'
template_dict['autonumber'] = autonumber_templ % self._num_downloads
+ if template_dict['playlist_index'] is not None:
+ template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
sanitize = lambda k,v: sanitize_filename(
u'NA' if v is None else compat_str(v),
@@ -422,10 +424,110 @@ class FileDownloader(object):
if re.search(rejecttitle, title, re.IGNORECASE):
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
return None
+
+ def extract_info(self, url, download = True):
+ '''
+ Returns a list with a dictionary for each video we find.
+ If 'download', also downloads the videos.
+ '''
+ suitable_found = False
+ for ie in self._ies:
+ # Go to next InfoExtractor if not suitable
+ if not ie.suitable(url):
+ continue
+
+ # Warn if the _WORKING attribute is False
+ if not ie.working():
+ self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
+ u'and will probably not work. If you want to go on, use the -i option.')
+
+ # Suitable InfoExtractor found
+ suitable_found = True
+
+ # Extract information from URL and process it
+ try:
+ ie_results = ie.extract(url)
+ results = []
+ for ie_result in ie_results:
+ if not 'extractor' in ie_result:
+ #The extractor has already been set somewhere else
+ ie_result['extractor'] = ie.IE_NAME
+ results.append(self.process_ie_result(ie_result, download))
+ return results
+ except ExtractorError as de: # An error we somewhat expected
+ self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
+ break
+ except Exception as e:
+ if self.params.get('ignoreerrors', False):
+ self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
+ break
+ else:
+ raise
+ if not suitable_found:
+ self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
+
+ def process_ie_result(self, ie_result, download = True):
+ """
+ Take the result of the ie and return a list of videos.
+ For url elements it will search the suitable ie and get the videos
+ For playlist elements it will process each of the elements of the 'entries' key
+
+ It will also download the videos if 'download'.
+ """
+ result_type = ie_result.get('_type', 'video') #If not given we suppose it's a video, support the dafault old system
+ if result_type == 'video':
+ if 'playlist' not in ie_result:
+ #It isn't part of a playlist
+ ie_result['playlist'] = None
+ ie_result['playlist_index'] = None
+ if download:
+ #Do the download:
+ self.process_info(ie_result)
+ return ie_result
+ elif result_type == 'url':
+ #We get the video pointed by the url
+ result = self.extract_info(ie_result['url'], download)[0]
+ return result
+ elif result_type == 'playlist':
+ #We process each entry in the playlist
+ playlist = ie_result.get('title', None) or ie_result.get('id', None)
+ self.to_screen(u'[download] Downloading playlist: %s' % playlist)
+
+ playlist_results = []
+
+ n_all_entries = len(ie_result['entries'])
+ playliststart = self.params.get('playliststart', 1) - 1
+ playlistend = self.params.get('playlistend', -1)
+
+ if playlistend == -1:
+ entries = ie_result['entries'][playliststart:]
+ else:
+ entries = ie_result['entries'][playliststart:playlistend]
+
+ n_entries = len(entries)
+
+ self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
+ (ie_result['extractor'], playlist, n_all_entries, n_entries))
+
+ for i,entry in enumerate(entries,1):
+ self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
+ entry_result = self.process_ie_result(entry, False)
+ entry_result['playlist'] = playlist
+ entry_result['playlist_index'] = i + playliststart
+ #We must do the download here to correctly set the 'playlist' key
+ if download:
+ self.process_info(entry_result)
+ playlist_results.append(entry_result)
+ result = ie_result.copy()
+ result['entries'] = playlist_results
+ return result
def process_info(self, info_dict):
"""Process a single dictionary returned by an InfoExtractor."""
+ #We increment the download the download count here to match the previous behaviour.
+ self.increment_downloads()
+
info_dict['fulltitle'] = info_dict['title']
if len(info_dict['title']) > 200:
info_dict['title'] = info_dict['title'][:197] + u'...'
@@ -564,53 +666,14 @@ class FileDownloader(object):
raise SameFileError(self.params['outtmpl'])
for url in url_list:
- suitable_found = False
- for ie in self._ies:
- # Go to next InfoExtractor if not suitable
- if not ie.suitable(url):
- continue
-
- # Warn if the _WORKING attribute is False
- if not ie.working():
- self.report_warning(u'the program functionality for this site has been marked as broken, '
- u'and will probably not work. If you want to go on, use the -i option.')
-
- # Suitable InfoExtractor found
- suitable_found = True
-
- # Extract information from URL and process it
- try:
- videos = ie.extract(url)
- except ExtractorError as de: # An error we somewhat expected
- self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
- break
- except MaxDownloadsReached:
- self.to_screen(u'[info] Maximum number of downloaded files reached.')
- raise
- except Exception as e:
- if self.params.get('ignoreerrors', False):
- self.report_error(u'' + compat_str(e), tb=compat_str(traceback.format_exc()))
- break
- else:
- raise
-
- if len(videos or []) > 1 and self.fixed_template():
- raise SameFileError(self.params['outtmpl'])
-
- for video in videos or []:
- video['extractor'] = ie.IE_NAME
- try:
- self.increment_downloads()
- self.process_info(video)
- except UnavailableVideoError:
- self.to_stderr(u"\n")
- self.report_error(u'unable to download video')
-
- # Suitable InfoExtractor had been found; go to next URL
- break
-
- if not suitable_found:
- self.report_error(u'no suitable InfoExtractor: %s' % url)
+ try:
+ #It also downloads the videos
+ videos = self.extract_info(url)
+ except UnavailableVideoError:
+ self.trouble(u'\nERROR: unable to download video')
+ except MaxDownloadsReached:
+ self.to_screen(u'[info] Maximum number of downloaded files reached.')
+ raise
return self._download_retcode
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index bac3a747d..ae36558d7 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -143,6 +143,28 @@ class InfoExtractor(object):
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
return webpage_bytes.decode(encoding, 'replace')
+
+ #Methods for following #608
+ #They set the correct value of the '_type' key
+ def video_result(self, video_info):
+ """Returns a video"""
+ video_info['_type'] = 'video'
+ return video_info
+ def url_result(self, url, ie=None):
+ """Returns a url that points to a page that should be processed"""
+ #TODO: ie should be the class used for getting the info
+ video_info = {'_type': 'url',
+ 'url': url}
+ return video_info
+ def playlist_result(self, entries, playlist_id=None, playlist_title=None):
+ """Returns a playlist"""
+ video_info = {'_type': 'playlist',
+ 'entries': entries}
+ if playlist_id:
+ video_info['id'] = playlist_id
+ if playlist_title:
+ video_info['title'] = playlist_title
+ return video_info
class YoutubeIE(InfoExtractor):
@@ -706,8 +728,7 @@ class MetacafeIE(InfoExtractor):
# Check if video comes from YouTube
mobj2 = re.match(r'^yt-(.*)$', video_id)
if mobj2 is not None:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
- return
+ return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1))]
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
@@ -1348,7 +1369,7 @@ class GenericIE(InfoExtractor):
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
def _test_redirect(self, url):
- """Check if it is a redirect, like url shorteners, in case restart chain."""
+ """Check if it is a redirect, like url shorteners, in case return the new url."""
class HeadRequest(compat_urllib_request.Request):
def get_method(self):
return "HEAD"
@@ -1399,11 +1420,11 @@ class GenericIE(InfoExtractor):
return False
self.report_following_redirect(new_url)
- self._downloader.download([new_url])
- return True
+ return new_url
def _real_extract(self, url):
- if self._test_redirect(url): return
+ new_url = self._test_redirect(url)
+ if new_url: return [self.url_result(new_url)]
video_id = url.split('/')[-1]
try:
@@ -1794,23 +1815,9 @@ class YoutubePlaylistIE(InfoExtractor):
page_num += 1
videos = [v[1] for v in sorted(videos)]
- total = len(videos)
-
- playliststart = self._downloader.params.get('playliststart', 1) - 1
- playlistend = self._downloader.params.get('playlistend', -1)
- if playlistend == -1:
- videos = videos[playliststart:]
- else:
- videos = videos[playliststart:playlistend]
-
- if len(videos) == total:
- self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
- else:
- self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
- for video in videos:
- self._downloader.download([video])
- return
+ url_results = [self.url_result(url) for url in videos]
+ return [self.playlist_result(url_results, playlist_id)]
class YoutubeChannelIE(InfoExtractor):
@@ -1860,9 +1867,9 @@ class YoutubeChannelIE(InfoExtractor):
self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
- for id in video_ids:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
- return
+ urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
+ url_entries = [self.url_result(url) for url in urls]
+ return [self.playlist_result(url_entries, channel_id)]
class YoutubeUserIE(InfoExtractor):
@@ -1932,20 +1939,9 @@ class YoutubeUserIE(InfoExtractor):
pagenum += 1
- all_ids_count = len(video_ids)
- playliststart = self._downloader.params.get('playliststart', 1) - 1
- playlistend = self._downloader.params.get('playlistend', -1)
-
- if playlistend == -1:
- video_ids = video_ids[playliststart:]
- else:
- video_ids = video_ids[playliststart:playlistend]
-
- self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
- (username, all_ids_count, len(video_ids)))
-
- for video_id in video_ids:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
+ urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
+ url_results = [self.url_result(url) for url in urls]
+ return [self.playlist_result(url_results, playlist_title = username)]
class BlipTVUserIE(InfoExtractor):
@@ -2023,20 +2019,12 @@ class BlipTVUserIE(InfoExtractor):
pagenum += 1
- all_ids_count = len(video_ids)
- playliststart = self._downloader.params.get('playliststart', 1) - 1
- playlistend = self._downloader.params.get('playlistend', -1)
-
- if playlistend == -1:
- video_ids = video_ids[playliststart:]
- else:
- video_ids = video_ids[playliststart:playlistend]
-
self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
(self.IE_NAME, username, all_ids_count, len(video_ids)))
- for video_id in video_ids:
- self._downloader.download([u'http://blip.tv/'+video_id])
+ urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
+ url_entries = [self.url_result(url) for url in urls]
+ return [self.playlist_result(url_entries, playlist_title = username)]
class DepositFilesIE(InfoExtractor):