aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/FileDownloader.py174
-rwxr-xr-xyoutube_dl/InfoExtractors.py495
-rw-r--r--youtube_dl/__init__.py11
-rw-r--r--youtube_dl/version.py2
4 files changed, 368 insertions, 314 deletions
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index d4f9cc621..7139adf6b 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -17,6 +17,7 @@ if os.name == 'nt':
import ctypes
from .utils import *
+from .InfoExtractors import get_info_extractor
class FileDownloader(object):
@@ -393,6 +394,8 @@ class FileDownloader(object):
autonumber_size = 5
autonumber_templ = u'%0' + str(autonumber_size) + u'd'
template_dict['autonumber'] = autonumber_templ % self._num_downloads
+ if template_dict['playlist_index'] is not None:
+ template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
sanitize = lambda k,v: sanitize_filename(
u'NA' if v is None else compat_str(v),
@@ -422,10 +425,126 @@ class FileDownloader(object):
if re.search(rejecttitle, title, re.IGNORECASE):
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
return None
+
+ def extract_info(self, url, download = True, ie_name = None):
+ '''
+ Returns a list with a dictionary for each video we find.
+ If 'download', also downloads the videos.
+ '''
+ suitable_found = False
+
+ #We copy the original list
+ ies = list(self._ies)
+
+ if ie_name is not None:
+ #We put in the first place the given info extractor
+ first_ie = get_info_extractor(ie_name)()
+ first_ie.set_downloader(self)
+ ies.insert(0, first_ie)
+
+ for ie in ies:
+ # Go to next InfoExtractor if not suitable
+ if not ie.suitable(url):
+ continue
+
+ # Warn if the _WORKING attribute is False
+ if not ie.working():
+ self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
+ u'and will probably not work. If you want to go on, use the -i option.')
+
+ # Suitable InfoExtractor found
+ suitable_found = True
+
+ # Extract information from URL and process it
+ try:
+ ie_results = ie.extract(url)
+ if ie_results is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
+ break
+ results = []
+ for ie_result in ie_results:
+ if not 'extractor' in ie_result:
+ #The extractor has already been set somewhere else
+ ie_result['extractor'] = ie.IE_NAME
+ results.append(self.process_ie_result(ie_result, download))
+ return results
+ except ExtractorError as de: # An error we somewhat expected
+ self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
+ break
+ except Exception as e:
+ if self.params.get('ignoreerrors', False):
+ self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
+ break
+ else:
+ raise
+ if not suitable_found:
+ self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
+
+ def process_ie_result(self, ie_result, download = True):
+ """
+ Take the result of the ie and return a list of videos.
+ For url elements it will search the suitable ie and get the videos
+ For playlist elements it will process each of the elements of the 'entries' key
+
+ It will also download the videos if 'download'.
+ """
+ result_type = ie_result.get('_type', 'video') #If not given we suppose it's a video, support the dafault old system
+ if result_type == 'video':
+ if 'playlist' not in ie_result:
+ #It isn't part of a playlist
+ ie_result['playlist'] = None
+ ie_result['playlist_index'] = None
+ if download:
+ #Do the download:
+ self.process_info(ie_result)
+ return ie_result
+ elif result_type == 'url':
+ #We get the video pointed by the url
+ result = self.extract_info(ie_result['url'], download, ie_name = ie_result['ie_key'])[0]
+ return result
+ elif result_type == 'playlist':
+ #We process each entry in the playlist
+ playlist = ie_result.get('title', None) or ie_result.get('id', None)
+ self.to_screen(u'[download] Downloading playlist: %s' % playlist)
+
+ playlist_results = []
+
+ n_all_entries = len(ie_result['entries'])
+ playliststart = self.params.get('playliststart', 1) - 1
+ playlistend = self.params.get('playlistend', -1)
+
+ if playlistend == -1:
+ entries = ie_result['entries'][playliststart:]
+ else:
+ entries = ie_result['entries'][playliststart:playlistend]
+
+ n_entries = len(entries)
+
+ self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
+ (ie_result['extractor'], playlist, n_all_entries, n_entries))
+
+ for i,entry in enumerate(entries,1):
+ self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
+ entry_result = self.process_ie_result(entry, False)
+ entry_result['playlist'] = playlist
+ entry_result['playlist_index'] = i + playliststart
+ #We must do the download here to correctly set the 'playlist' key
+ if download:
+ self.process_info(entry_result)
+ playlist_results.append(entry_result)
+ result = ie_result.copy()
+ result['entries'] = playlist_results
+ return result
def process_info(self, info_dict):
"""Process a single dictionary returned by an InfoExtractor."""
+ #We increment the download the download count here to match the previous behaviour.
+ self.increment_downloads()
+
+ info_dict['fulltitle'] = info_dict['title']
+ if len(info_dict['title']) > 200:
+ info_dict['title'] = info_dict['title'][:197] + u'...'
+
# Keep for backwards compatibility
info_dict['stitle'] = info_dict['title']
@@ -560,53 +679,14 @@ class FileDownloader(object):
raise SameFileError(self.params['outtmpl'])
for url in url_list:
- suitable_found = False
- for ie in self._ies:
- # Go to next InfoExtractor if not suitable
- if not ie.suitable(url):
- continue
-
- # Warn if the _WORKING attribute is False
- if not ie.working():
- self.report_warning(u'the program functionality for this site has been marked as broken, '
- u'and will probably not work. If you want to go on, use the -i option.')
-
- # Suitable InfoExtractor found
- suitable_found = True
-
- # Extract information from URL and process it
- try:
- videos = ie.extract(url)
- except ExtractorError as de: # An error we somewhat expected
- self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
- break
- except MaxDownloadsReached:
- self.to_screen(u'[info] Maximum number of downloaded files reached.')
- raise
- except Exception as e:
- if self.params.get('ignoreerrors', False):
- self.report_error(u'' + compat_str(e), tb=compat_str(traceback.format_exc()))
- break
- else:
- raise
-
- if len(videos or []) > 1 and self.fixed_template():
- raise SameFileError(self.params['outtmpl'])
-
- for video in videos or []:
- video['extractor'] = ie.IE_NAME
- try:
- self.increment_downloads()
- self.process_info(video)
- except UnavailableVideoError:
- self.to_stderr(u"\n")
- self.report_error(u'unable to download video')
-
- # Suitable InfoExtractor had been found; go to next URL
- break
-
- if not suitable_found:
- self.report_error(u'no suitable InfoExtractor: %s' % url)
+ try:
+ #It also downloads the videos
+ videos = self.extract_info(url)
+ except UnavailableVideoError:
+ self.trouble(u'\nERROR: unable to download video')
+ except MaxDownloadsReached:
+ self.to_screen(u'[info] Maximum number of downloaded files reached.')
+ raise
return self._download_retcode
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 7deb488a9..6a6545c9b 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -116,7 +116,7 @@ class InfoExtractor(object):
if note is None:
note = u'Downloading video webpage'
if note is not False:
- self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
+ self.to_screen(u'%s: %s' % (video_id, note))
try:
return compat_urllib_request.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
@@ -139,11 +139,46 @@ class InfoExtractor(object):
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
- self._downloader.to_screen(u'Dumping request to ' + url)
+ self.to_screen(u'Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
return webpage_bytes.decode(encoding, 'replace')
+ def to_screen(self, msg):
+ """Print msg to screen, prefixing it with '[ie_name]'"""
+ self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
+
+ def report_extraction(self, id_or_name):
+ """Report information extraction."""
+ self.to_screen(u'%s: Extracting information' % id_or_name)
+
+ def report_age_confirmation(self):
+ """Report attempt to confirm age."""
+ self.to_screen(u'Confirming age')
+
+ #Methods for following #608
+ #They set the correct value of the '_type' key
+ def video_result(self, video_info):
+ """Returns a video"""
+ video_info['_type'] = 'video'
+ return video_info
+ def url_result(self, url, ie=None):
+ """Returns a url that points to a page that should be processed"""
+ #TODO: ie should be the class used for getting the info
+ video_info = {'_type': 'url',
+ 'url': url,
+ 'ie_key': ie}
+ return video_info
+ def playlist_result(self, entries, playlist_id=None, playlist_title=None):
+ """Returns a playlist"""
+ video_info = {'_type': 'playlist',
+ 'entries': entries}
+ if playlist_id:
+ video_info['id'] = playlist_id
+ if playlist_title:
+ video_info['title'] = playlist_title
+ return video_info
+
class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com."""
@@ -213,48 +248,44 @@ class YoutubeIE(InfoExtractor):
def report_lang(self):
"""Report attempt to set language."""
- self._downloader.to_screen(u'[youtube] Setting language')
+ self.to_screen(u'Setting language')
def report_login(self):
"""Report attempt to log in."""
- self._downloader.to_screen(u'[youtube] Logging in')
-
- def report_age_confirmation(self):
- """Report attempt to confirm age."""
- self._downloader.to_screen(u'[youtube] Confirming age')
+ self.to_screen(u'Logging in')
def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
+ self.to_screen(u'%s: Downloading video webpage' % video_id)
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
+ self.to_screen(u'%s: Downloading video info webpage' % video_id)
def report_video_subtitles_download(self, video_id):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Checking available subtitles' % video_id)
+ self.to_screen(u'%s: Checking available subtitles' % video_id)
def report_video_subtitles_request(self, video_id, sub_lang, format):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
+ self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
def report_video_subtitles_available(self, video_id, sub_lang_list):
"""Report available subtitles."""
sub_lang = ",".join(list(sub_lang_list.keys()))
- self._downloader.to_screen(u'[youtube] %s: Available subtitles for video: %s' % (video_id, sub_lang))
+ self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang))
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
- self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
+ self.to_screen(u'%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
- self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
+ self.to_screen(u'%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
- self._downloader.to_screen(u'[youtube] RTMP download detected')
+ self.to_screen(u'RTMP download detected')
def _get_available_subtitles(self, video_id):
self.report_video_subtitles_download(video_id)
@@ -591,8 +622,7 @@ class YoutubeIE(InfoExtractor):
format_list = available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0:
- self._downloader.report_error(u'no known formats available for video')
- return
+ raise ExtractorError(u'no known formats available for video')
if self._downloader.params.get('listformats', None):
self._print_formats(existing_formats)
return
@@ -612,11 +642,9 @@ class YoutubeIE(InfoExtractor):
video_url_list = [(rf, url_map[rf])]
break
if video_url_list is None:
- self._downloader.report_error(u'requested format not available')
- return
+ raise ExtractorError(u'requested format not available')
else:
- self._downloader.report_error(u'no conn or url_encoded_fmt_stream_map information found in video info')
- return
+ raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info')
results = []
for format_param, video_real_url in video_url_list:
@@ -657,19 +685,11 @@ class MetacafeIE(InfoExtractor):
def report_disclaimer(self):
"""Report disclaimer retrieval."""
- self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
-
- def report_age_confirmation(self):
- """Report attempt to confirm age."""
- self._downloader.to_screen(u'[metacafe] Confirming age')
+ self.to_screen(u'Retrieving disclaimer')
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
+ self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_initialize(self):
# Retrieve disclaimer
@@ -706,17 +726,10 @@ class MetacafeIE(InfoExtractor):
# Check if video comes from YouTube
mobj2 = re.match(r'^yt-(.*)$', video_id)
if mobj2 is not None:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)])
- return
+ return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
# Retrieve video webpage to extract further information
- request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
@@ -741,13 +754,13 @@ class MetacafeIE(InfoExtractor):
if 'mediaData' not in vardict:
self._downloader.report_error(u'unable to extract media URL')
return
- mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
+ mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
if mobj is None:
self._downloader.report_error(u'unable to extract media URL')
return
- mediaURL = mobj.group(1).replace('\\/', '/')
+ mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_extension = mediaURL[-3:]
- video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
+ video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None:
@@ -781,10 +794,6 @@ class DailymotionIE(InfoExtractor):
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
-
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
@@ -812,7 +821,7 @@ class DailymotionIE(InfoExtractor):
for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']:
if key in flashvars:
max_quality = key
- self._downloader.to_screen(u'[dailymotion] Using %s' % key)
+ self.to_screen(u'Using %s' % key)
break
else:
self._downloader.report_error(u'unable to extract video URL')
@@ -871,11 +880,7 @@ class PhotobucketIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
+ self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url):
# Extract id from URL
@@ -940,11 +945,7 @@ class YahooIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
+ self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url, new_video=True):
# Extract ID from URL
@@ -1080,11 +1081,7 @@ class VimeoIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id)
+ self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url, new_video=True):
# Extract ID from URL
@@ -1166,7 +1163,7 @@ class VimeoIE(InfoExtractor):
video_quality = files[quality][0][2]
video_codec = files[quality][0][0]
video_extension = files[quality][0][1]
- self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
+ self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality))
break
else:
self._downloader.report_error(u'no known codec found')
@@ -1201,11 +1198,7 @@ class ArteTvIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id)
+ self.to_screen(u'%s: Downloading webpage' % video_id)
def fetch_webpage(self, url):
request = compat_urllib_request.Request(url)
@@ -1336,19 +1329,15 @@ class GenericIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
if not self._downloader.params.get('test', False):
- self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
- self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
+ self._downloader.report_warning(u'Falling back on generic information extractor.')
+ self.to_screen(u'%s: Downloading webpage' % video_id)
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
def _test_redirect(self, url):
- """Check if it is a redirect, like url shorteners, in case restart chain."""
+ """Check if it is a redirect, like url shorteners, in case return the new url."""
class HeadRequest(compat_urllib_request.Request):
def get_method(self):
return "HEAD"
@@ -1399,11 +1388,11 @@ class GenericIE(InfoExtractor):
return False
self.report_following_redirect(new_url)
- self._downloader.download([new_url])
- return True
+ return new_url
def _real_extract(self, url):
- if self._test_redirect(url): return
+ new_url = self._test_redirect(url)
+ if new_url: return [self.url_result(new_url)]
video_id = url.split('/')[-1]
try:
@@ -1494,11 +1483,9 @@ class YoutubeSearchIE(InfoExtractor):
prefix = prefix[8:]
query = query.encode('utf-8')
if prefix == '':
- self._download_n_results(query, 1)
- return
+ return self._get_n_results(query, 1)
elif prefix == 'all':
- self._download_n_results(query, self._max_youtube_results)
- return
+ self._get_n_results(query, self._max_youtube_results)
else:
try:
n = int(prefix)
@@ -1508,14 +1495,12 @@ class YoutubeSearchIE(InfoExtractor):
elif n > self._max_youtube_results:
self._downloader.report_warning(u'ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n))
n = self._max_youtube_results
- self._download_n_results(query, n)
- return
+ return self._get_n_results(query, n)
except ValueError: # parsing prefix as integer fails
- self._download_n_results(query, 1)
- return
+ return self._get_n_results(query, 1)
- def _download_n_results(self, query, n):
- """Downloads a specified number of results for a query"""
+ def _get_n_results(self, query, n):
+ """Get a specified number of results for a query"""
video_ids = []
pagenum = 0
@@ -1544,9 +1529,8 @@ class YoutubeSearchIE(InfoExtractor):
if len(video_ids) > n:
video_ids = video_ids[:n]
- for id in video_ids:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
- return
+ videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
+ return videos
class GoogleSearchIE(InfoExtractor):
@@ -1564,7 +1548,7 @@ class GoogleSearchIE(InfoExtractor):
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
- self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
+ self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
@@ -1648,7 +1632,7 @@ class YahooSearchIE(InfoExtractor):
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
- self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
+ self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
@@ -1778,9 +1762,15 @@ class YoutubePlaylistIE(InfoExtractor):
self._downloader.report_error(u'Invalid JSON in API response: ' + compat_str(err))
return
- if not 'feed' in response or not 'entry' in response['feed']:
+ if 'feed' not in response:
self._downloader.report_error(u'Got a malformed response from YouTube API')
return
+ if 'entry' not in response['feed']:
+ # Number of videos is a multiple of self._MAX_RESULTS
+ break
+
+ playlist_title = response['feed']['title']['$t']
+
videos += [ (entry['yt$position']['$t'], entry['content']['src'])
for entry in response['feed']['entry']
if 'content' in entry ]
@@ -1790,37 +1780,31 @@ class YoutubePlaylistIE(InfoExtractor):
page_num += 1
videos = [v[1] for v in sorted(videos)]
- total = len(videos)
-
- playliststart = self._downloader.params.get('playliststart', 1) - 1
- playlistend = self._downloader.params.get('playlistend', -1)
- if playlistend == -1:
- videos = videos[playliststart:]
- else:
- videos = videos[playliststart:playlistend]
-
- if len(videos) == total:
- self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))
- else:
- self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos)))
- for video in videos:
- self._downloader.download([video])
- return
+ url_results = [self.url_result(url, 'Youtube') for url in videos]
+ return [self.playlist_result(url_results, playlist_id, playlist_title)]
class YoutubeChannelIE(InfoExtractor):
"""Information Extractor for YouTube channels."""
- _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$"
+ _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
_TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en'
- _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
+ _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
+ _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
IE_NAME = u'youtube:channel'
def report_download_page(self, channel_id, pagenum):
"""Report attempt to download channel page with given number."""
self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum))
+ def extract_videos_from_page(self, page):
+ ids_in_page = []
+ for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
+ if mobj.group(1) not in ids_in_page:
+ ids_in_page.append(mobj.group(1))
+ return ids_in_page
+
def _real_extract(self, url):
# Extract channel id
mobj = re.match(self._VALID_URL, url)
@@ -1828,37 +1812,51 @@ class YoutubeChannelIE(InfoExtractor):
self._downloader.report_error(u'invalid url: %s' % url)
return
- # Download channel pages
+ # Download channel page
channel_id = mobj.group(1)
video_ids = []
pagenum = 1
- while True:
- self.report_download_page(channel_id, pagenum)
- url = self._TEMPLATE_URL % (channel_id, pagenum)
- request = compat_urllib_request.Request(url)
- try:
- page = compat_urllib_request.urlopen(request).read().decode('utf8')
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
- return
+ self.report_download_page(channel_id, pagenum)
+ url = self._TEMPLATE_URL % (channel_id, pagenum)
+ request = compat_urllib_request.Request(url)
+ try:
+ page = compat_urllib_request.urlopen(request).read().decode('utf8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
+ return
- # Extract video identifiers
- ids_in_page = []
- for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page):
- if mobj.group(1) not in ids_in_page:
- ids_in_page.append(mobj.group(1))
- video_ids.extend(ids_in_page)
+ # Extract video identifiers
+ ids_in_page = self.extract_videos_from_page(page)
+ video_ids.extend(ids_in_page)
- if self._MORE_PAGES_INDICATOR not in page:
- break
- pagenum = pagenum + 1
+ # Download any subsequent channel pages using the json-based channel_ajax query
+ if self._MORE_PAGES_INDICATOR in page:
+ while True:
+ pagenum = pagenum + 1
+
+ self.report_download_page(channel_id, pagenum)
+ url = self._MORE_PAGES_URL % (pagenum, channel_id)
+ request = compat_urllib_request.Request(url)
+ try:
+ page = compat_urllib_request.urlopen(request).read().decode('utf8')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.report_error(u'unable to download webpage: %s' % compat_str(err))
+ return
+
+ page = json.loads(page)
+
+ ids_in_page = self.extract_videos_from_page(page['content_html'])
+ video_ids.extend(ids_in_page)
+
+ if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
+ break
self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
- for id in video_ids:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % id])
- return
+ urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids]
+ url_entries = [self.url_result(url, 'Youtube') for url in urls]
+ return [self.playlist_result(url_entries, channel_id)]
class YoutubeUserIE(InfoExtractor):
@@ -1928,20 +1926,9 @@ class YoutubeUserIE(InfoExtractor):
pagenum += 1
- all_ids_count = len(video_ids)
- playliststart = self._downloader.params.get('playliststart', 1) - 1
- playlistend = self._downloader.params.get('playlistend', -1)
-
- if playlistend == -1:
- video_ids = video_ids[playliststart:]
- else:
- video_ids = video_ids[playliststart:playlistend]
-
- self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" %
- (username, all_ids_count, len(video_ids)))
-
- for video_id in video_ids:
- self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id])
+ urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids]
+ url_results = [self.url_result(url, 'Youtube') for url in urls]
+ return [self.playlist_result(url_results, playlist_title = username)]
class BlipTVUserIE(InfoExtractor):
@@ -1956,8 +1943,8 @@ class BlipTVUserIE(InfoExtractor):
def report_download_page(self, username, pagenum):
"""Report attempt to download user page."""
- self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' %
- (self.IE_NAME, username, pagenum))
+ self.to_screen(u'user %s: Downloading video ids from page %d' %
+ (username, pagenum))
def _real_extract(self, url):
# Extract username
@@ -2019,20 +2006,9 @@ class BlipTVUserIE(InfoExtractor):
pagenum += 1
- all_ids_count = len(video_ids)
- playliststart = self._downloader.params.get('playliststart', 1) - 1
- playlistend = self._downloader.params.get('playlistend', -1)
-
- if playlistend == -1:
- video_ids = video_ids[playliststart:]
- else:
- video_ids = video_ids[playliststart:playlistend]
-
- self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" %
- (self.IE_NAME, username, all_ids_count, len(video_ids)))
-
- for video_id in video_ids:
- self._downloader.download([u'http://blip.tv/'+video_id])
+ urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
+ url_entries = [self.url_result(url, 'BlipTV') for url in urls]
+ return [self.playlist_result(url_entries, playlist_title = username)]
class DepositFilesIE(InfoExtractor):
@@ -2042,11 +2018,7 @@ class DepositFilesIE(InfoExtractor):
def report_download_webpage(self, file_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id)
-
- def report_extraction(self, file_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id)
+ self.to_screen(u'%s: Downloading webpage' % file_id)
def _real_extract(self, url):
file_id = url.split('/')[-1]
@@ -2105,7 +2077,7 @@ class FacebookIE(InfoExtractor):
def report_login(self):
"""Report attempt to log in."""
- self._downloader.to_screen(u'[%s] Logging in' % self.IE_NAME)
+ self.to_screen(u'Logging in')
def _real_initialize(self):
if self._downloader is None:
@@ -2161,7 +2133,7 @@ class FacebookIE(InfoExtractor):
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
webpage = self._download_webpage(url, video_id)
- BEFORE = '[["allowFullScreen","true"],["allowScriptAccess","always"],["salign","tl"],["scale","noscale"],["wmode","opaque"]].forEach(function(param) {swf.addParam(param[0], param[1]);});\n'
+ BEFORE = '{swf.addParam(param[0], param[1]);});\n'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
if not m:
@@ -2169,12 +2141,14 @@ class FacebookIE(InfoExtractor):
data = dict(json.loads(m.group(1)))
params_raw = compat_urllib_parse.unquote(data['params'])
params = json.loads(params_raw)
- video_url = params['hd_src']
+ video_data = params['video_data'][0]
+ video_url = video_data.get('hd_src')
if not video_url:
- video_url = params['sd_src']
+ video_url = video_data['sd_src']
if not video_url:
raise ExtractorError(u'Cannot find video URL')
- video_duration = int(params['video_duration'])
+ video_duration = int(video_data['video_duration'])
+ thumbnail = video_data['thumbnail_src']
m = re.search('<h2 class="uiHeaderTitle">([^<]+)</h2>', webpage)
if not m:
@@ -2187,7 +2161,7 @@ class FacebookIE(InfoExtractor):
'url': video_url,
'ext': 'mp4',
'duration': video_duration,
- 'thumbnail': params['thumbnail_src'],
+ 'thumbnail': thumbnail,
}
return [info]
@@ -2199,13 +2173,9 @@ class BlipTVIE(InfoExtractor):
_URL_EXT = r'^.*\.([a-z0-9]+)$'
IE_NAME = u'blip.tv'
- def report_extraction(self, file_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
-
def report_direct_download(self, title):
"""Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title))
+ self.to_screen(u'%s: Direct download detected' % title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -2303,10 +2273,6 @@ class MyVideoIE(InfoExtractor):
def __init__(self, downloader=None):
InfoExtractor.__init__(self, downloader)
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id)
-
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
@@ -2384,14 +2350,11 @@ class ComedyCentralIE(InfoExtractor):
"""Receives a URL and returns True if suitable for this IE."""
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
- def report_extraction(self, episode_id):
- self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
-
def report_config_download(self, episode_id, media_id):
- self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration for %s' % (episode_id, media_id))
+ self.to_screen(u'%s: Downloading configuration for %s' % (episode_id, media_id))
def report_index_download(self, episode_id):
- self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id)
+ self.to_screen(u'%s: Downloading show index' % episode_id)
def _print_formats(self, formats):
print('Available formats:')
@@ -2545,11 +2508,8 @@ class EscapistIE(InfoExtractor):
_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
IE_NAME = u'escapist'
- def report_extraction(self, showName):
- self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName)
-
def report_config_download(self, showName):
- self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName)
+ self.to_screen(u'%s: Downloading configuration' % showName)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -2622,11 +2582,7 @@ class CollegeHumorIE(InfoExtractor):
def report_manifest(self, video_id):
"""Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Downloading XML manifest' % (self.IE_NAME, video_id))
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+ self.to_screen(u'%s: Downloading XML manifest' % video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -2691,10 +2647,6 @@ class XVideosIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
IE_NAME = u'xvideos'
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
@@ -2761,11 +2713,7 @@ class SoundcloudIE(InfoExtractor):
def report_resolve(self, video_id):
"""Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
+ self.to_screen(u'%s: Resolving id' % video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -2834,11 +2782,7 @@ class SoundcloudSetIE(InfoExtractor):
def report_resolve(self, video_id):
"""Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id))
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id))
+ self.to_screen(u'%s: Resolving id' % video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -2903,10 +2847,6 @@ class InfoQIE(InfoExtractor):
"""Information extractor for infoq.com"""
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
@@ -2965,11 +2905,7 @@ class MixcloudIE(InfoExtractor):
def report_download_json(self, file_id):
"""Report JSON download."""
- self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME)
-
- def report_extraction(self, file_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+ self.to_screen(u'Downloading json')
def get_urls(self, jsonData, fmt, bitrate='best'):
"""Get urls from 'audio_formats' section in json"""
@@ -3076,11 +3012,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
def report_download_webpage(self, objid):
"""Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid))
-
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+ self.to_screen(u'%s: Downloading webpage' % objid)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -3186,10 +3118,6 @@ class MTVIE(InfoExtractor):
_VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
IE_NAME = u'mtv'
- def report_extraction(self, video_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
@@ -3266,11 +3194,7 @@ class YoukuIE(InfoExtractor):
def report_download_webpage(self, file_id):
"""Report webpage download."""
- self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, file_id))
-
- def report_extraction(self, file_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+ self.to_screen(u'%s: Downloading webpage' % file_id)
def _gen_sid(self):
nowTime = int(time.time() * 1000)
@@ -3383,11 +3307,7 @@ class XNXXIE(InfoExtractor):
def report_webpage(self, video_id):
"""Report information extraction"""
- self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id))
-
- def report_extraction(self, video_id):
- """Report information extraction"""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+ self.to_screen(u'%s: Downloading webpage' % video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -3447,23 +3367,23 @@ class GooglePlusIE(InfoExtractor):
def report_extract_entry(self, url):
"""Report downloading extry"""
- self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url)
+ self.to_screen(u'Downloading entry: %s' % url)
def report_date(self, upload_date):
"""Report downloading extry"""
- self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date)
+ self.to_screen(u'Entry date: %s' % upload_date)
def report_uploader(self, uploader):
"""Report downloading extry"""
- self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader)
+ self.to_screen(u'Uploader: %s' % uploader)
def report_title(self, video_title):
"""Report downloading extry"""
- self._downloader.to_screen(u'[plus.google] Title: %s' % video_title)
+ self.to_screen(u'Title: %s' % video_title)
def report_extract_vid_page(self, video_page):
"""Report information extraction."""
- self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page)
+ self.to_screen(u'Extracting video page: %s' % video_page)
def _real_extract(self, url):
# Extract id from URL
@@ -3607,14 +3527,10 @@ class JustinTVIE(InfoExtractor):
_JUSTIN_PAGE_LIMIT = 100
IE_NAME = u'justin.tv'
- def report_extraction(self, file_id):
- """Report information extraction."""
- self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
-
def report_download_page(self, channel, offset):
"""Report attempt to download a single page of videos."""
- self._downloader.to_screen(u'[%s] %s: Downloading video information from %d to %d' %
- (self.IE_NAME, channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
+ self.to_screen(u'%s: Downloading video information from %d to %d' %
+ (channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
# Return count of items, list of *valid* items
def _parse_page(self, url):
@@ -3702,7 +3618,9 @@ class FunnyOrDieIE(InfoExtractor):
m = re.search(r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", webpage, flags=re.DOTALL)
if not m:
- self._downloader.trouble(u'Cannot find video title')
+ m = re.search(r'<title>(?P<title>[^<]+?)</title>', webpage)
+ if not m:
+ self._downloader.trouble(u'Cannot find video title')
title = clean_html(m.group('title'))
m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage)
@@ -3734,10 +3652,13 @@ class SteamIE(InfoExtractor):
def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE)
- urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
gameID = m.group('gameID')
- videourl = 'http://store.steampowered.com/video/%s/' % gameID
+ videourl = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970' % gameID
+ self.report_age_confirmation()
webpage = self._download_webpage(videourl, gameID)
+ game_title = re.search(r'<h2 class="pageheader">(?P<game_title>.*?)</h2>', webpage).group('game_title')
+
+ urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
mweb = re.finditer(urlRE, webpage)
namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
titles = re.finditer(namesRE, webpage)
@@ -3759,7 +3680,7 @@ class SteamIE(InfoExtractor):
'thumbnail': video_thumb
}
videos.append(info)
- return videos
+ return [self.playlist_result(videos, gameID, game_title)]
class UstreamIE(InfoExtractor):
_VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
@@ -3941,7 +3862,7 @@ class YouPornIE(InfoExtractor):
if(len(links) == 0):
raise ExtractorError(u'ERROR: no known formats available for video')
- self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))
+ self.to_screen(u'Links found: %d' % len(links))
formats = []
for link in links:
@@ -3977,7 +3898,7 @@ class YouPornIE(InfoExtractor):
return
req_format = self._downloader.params.get('format', None)
- self._downloader.to_screen(u'[youporn] Format: %s' % req_format)
+ self.to_screen(u'Format: %s' % req_format)
if req_format is None or req_format == 'best':
return [formats[0]]
@@ -4172,8 +4093,8 @@ class TEDIE(InfoExtractor):
else :
playlist_id=m.group('playlist_id')
name=m.group('name')
- self._downloader.to_screen(u'[%s] Getting info of playlist %s: "%s"' % (self.IE_NAME,playlist_id,name))
- return self._playlist_videos_info(url,name,playlist_id)
+ self.to_screen(u'Getting info of playlist %s: "%s"' % (playlist_id,name))
+ return [self._playlist_videos_info(url,name,playlist_id)]
def _talk_video_link(self,mediaSlug):
'''Returns the video link for that mediaSlug'''
@@ -4190,12 +4111,17 @@ class TEDIE(InfoExtractor):
webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
m_names=re.finditer(video_name_RE,webpage)
- info=[]
+
+ playlist_RE = r'div class="headline">(\s*?)<h1>(\s*?)<span>(?P<playlist_title>.*?)</span>'
+ m_playlist = re.search(playlist_RE, webpage)
+ playlist_title = m_playlist.group('playlist_title')
+
+ playlist_entries = []
for m_video, m_name in zip(m_videos,m_names):
video_id=m_video.group('video_id')
talk_url='http://www.ted.com%s' % m_name.group('talk_url')
- info.append(self._talk_info(talk_url,video_id))
- return info
+ playlist_entries.append(self.url_result(talk_url, 'TED'))
+ return self.playlist_result(playlist_entries, playlist_id = playlist_id, playlist_title = playlist_title)
def _talk_info(self, url, video_id=0):
"""Return the video for the talk in the url"""
@@ -4392,7 +4318,7 @@ class ARDIE(InfoExtractor):
# there's two possibilities: RTMP stream or HTTP download
info = {'id': video_id, 'title': title, 'ext': 'mp4'}
if stream['rtmp_url']:
- self._downloader.to_screen(u'[%s] RTMP download detected' % self.IE_NAME)
+ self.to_screen(u'RTMP download detected')
assert stream['video_url'].startswith('mp4:')
info["url"] = stream["rtmp_url"]
info["play_path"] = stream['video_url']
@@ -4401,6 +4327,40 @@ class ARDIE(InfoExtractor):
info["url"] = stream["video_url"]
return [info]
+class TumblrIE(InfoExtractor):
+ _VALID_URL = r'http://(?P<blog_name>.*?).tumblr.com/((post)|(video))/(?P<id>\d*)/(.*?)'
+
+ def _real_extract(self, url):
+ m_url = re.match(self._VALID_URL, url)
+ video_id = m_url.group('id')
+ blog = m_url.group('blog_name')
+
+ url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
+ webpage = self._download_webpage(url, video_id)
+
+ re_video = r'src=\\x22(?P<video_url>http://%s.tumblr.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
+ video = re.search(re_video, webpage)
+ if video is None:
+ self.to_screen("No video founded")
+ return []
+ video_url = video.group('video_url')
+ ext = video.group('ext')
+
+ re_thumb = r'posters(.*?)\[\\x22(?P<thumb>.*?)\\x22' # We pick the first poster
+ thumb = re.search(re_thumb, webpage).group('thumb').replace('\\', '')
+
+ # The only place where you can get a title, it's not complete,
+ # but searching in other places doesn't work for all videos
+ re_title = r'<title>(.*?) - (?P<title>.*?)</title>'
+ title = unescapeHTML(re.search(re_title, webpage).group('title'))
+
+ return [{'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'thumbnail': thumb,
+ 'ext': ext
+ }]
+
def gen_extractors():
""" Return a list of an instance of every supported extractor.
@@ -4455,5 +4415,10 @@ def gen_extractors():
SpiegelIE(),
LiveLeakIE(),
ARDIE(),
+ TumblrIE(),
GenericIE()
]
+
+def get_info_extractor(ie_name):
+ """Returns the info extractor class with the given ie_name"""
+ return globals()[ie_name+'IE']
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 87d3f222a..8ec7435ca 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -238,7 +238,16 @@ def parseOpts(arguments):
action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output',
- dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')
+ dest='outtmpl', metavar='TEMPLATE',
+ help=('output filename template. Use %(title)s to get the title, '
+ '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
+ '%(autonumber)s to get an automatically incremented number, '
+ '%(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), '
+ '%(extractor)s for the provider (youtube, metacafe, etc), '
+ '%(id)s for the video id , %(playlist)s for the playlist the video is in, '
+ '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
+ 'Use - to output to stdout. Can also be used to download to a different directory, '
+ 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option('--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --autonumber option is given')
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 3535e61d4..ac8a05ab5 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,2 +1,2 @@
-__version__ = '2013.04.11'
+__version__ = '2013.04.22'