aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--test/tests.json10
-rw-r--r--youtube_dl/FileDownloader.py8
-rwxr-xr-xyoutube_dl/InfoExtractors.py261
-rw-r--r--youtube_dl/PostProcessor.py3
-rw-r--r--youtube_dl/__init__.py5
-rw-r--r--youtube_dl/utils.py19
-rw-r--r--youtube_dl/version.py2
8 files changed, 127 insertions, 183 deletions
diff --git a/README.md b/README.md
index a9eaed192..2f3c81a7c 100644
--- a/README.md
+++ b/README.md
@@ -116,7 +116,7 @@ which means you can modify it, redistribute it or use it however you like.
-F, --list-formats list all available formats (currently youtube
only)
--write-sub write subtitle file (currently youtube only)
- --only-sub downloads only the subtitles (no video)
+ --only-sub [deprecated] alias of --skip-download
--all-subs downloads all the available subtitles of the
video (currently youtube only)
--list-subs lists all available subtitles for the video
diff --git a/test/tests.json b/test/tests.json
index 5a14b0523..f0e5cd8bf 100644
--- a/test/tests.json
+++ b/test/tests.json
@@ -430,5 +430,15 @@
"info_dict":{
"title":"François Hollande \"Je crois que c'est clair\""
}
+ },
+ {
+ "name": "Yahoo",
+ "url": "http://screen.yahoo.com/obama-celebrates-iraq-victory-27592561.html",
+ "file": "27592561.flv",
+ "md5": "c6179bed843512823fd284fa2e7f012d",
+ "info_dict": {
+ "title": "Obama Celebrates Iraq Victory"
+ },
+ "skip": "Requires rtmpdump"
}
]
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index eb68d9478..8a3bdf21b 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -83,7 +83,6 @@ class FileDownloader(object):
writeinfojson: Write the video description to a .info.json file
writethumbnail: Write the thumbnail image to a file
writesubtitles: Write the video subtitles to a file
- onlysubtitles: Downloads only the subtitles of the video
allsubtitles: Downloads all the subtitles of the video
listsubtitles: Lists all available subtitles for the video
subtitlesformat: Subtitle format [sbv/srt] (default=srt)
@@ -93,6 +92,7 @@ class FileDownloader(object):
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
daterange: A DateRange object, download only if the upload_date is in the range.
+ skip_download: Skip the actual download of the video file
"""
params = None
@@ -597,7 +597,7 @@ class FileDownloader(object):
try:
dn = os.path.dirname(encodeFilename(filename))
- if dn != '' and not os.path.exists(dn): # dn is already encoded
+ if dn != '' and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error(u'unable to create directory ' + compat_str(err))
@@ -630,8 +630,6 @@ class FileDownloader(object):
except (OSError, IOError):
self.report_error(u'Cannot write subtitles file ' + descfn)
return
- if self.params.get('onlysubtitles', False):
- return
if self.params.get('allsubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
subtitles = info_dict['subtitles']
@@ -649,8 +647,6 @@ class FileDownloader(object):
except (OSError, IOError):
self.report_error(u'Cannot write subtitles file ' + descfn)
return
- if self.params.get('onlysubtitles', False):
- return
if self.params.get('writeinfojson', False):
infofn = filename + u'.info.json'
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index ec52cbcff..4bcd8b9cd 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -912,123 +912,72 @@ class PhotobucketIE(InfoExtractor):
class YahooIE(InfoExtractor):
- """Information extractor for video.yahoo.com."""
+ """Information extractor for screen.yahoo.com."""
+ _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html'
- _WORKING = False
- # _VALID_URL matches all Yahoo! Video URLs
- # _VPAGE_URL matches only the extractable '/watch/' URLs
- _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?'
- _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?'
- IE_NAME = u'video.yahoo'
-
- def _real_extract(self, url, new_video=True):
- # Extract ID from URL
+ def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
-
- video_id = mobj.group(2)
- video_extension = 'flv'
-
- # Rewrite valid but non-extractable URLs as
- # extractable English language /watch/ URLs
- if re.match(self._VPAGE_URL, url) is None:
- request = compat_urllib_request.Request(url)
- try:
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
-
- mobj = re.search(r'\("id", "([0-9]+)"\);', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract id field')
- yahoo_id = mobj.group(1)
-
- mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract vid field')
- yahoo_vid = mobj.group(1)
-
- url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id)
- return self._real_extract(url, new_video=False)
-
- # Retrieve video webpage to extract further information
- request = compat_urllib_request.Request(url)
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
-
- # Extract uploader and title from webpage
- self.report_extraction(video_id)
- mobj = re.search(r'<meta name="title" content="(.*)" />', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract video title')
- video_title = mobj.group(1).decode('utf-8')
-
- mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract video uploader')
- video_uploader = mobj.group(1).decode('utf-8')
-
- # Extract video thumbnail
- mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract video thumbnail')
- video_thumbnail = mobj.group(1).decode('utf-8')
-
- # Extract video description
- mobj = re.search(r'<meta name="description" content="(.*)" />', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract video description')
- video_description = mobj.group(1).decode('utf-8')
- if not video_description:
- video_description = 'No description available.'
-
- # Extract video height and width
- mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract video height')
- yv_video_height = mobj.group(1)
-
- mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract video width')
- yv_video_width = mobj.group(1)
-
- # Retrieve video playlist to extract media URL
- # I'm not completely sure what all these options are, but we
- # seem to need most of them, otherwise the server sends a 401.
- yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents
- yv_bitrate = '700' # according to Wikipedia this is hard-coded
- request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id +
- '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height +
- '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797')
- try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to retrieve video webpage: %s' % compat_str(err))
-
- # Extract media URL from playlist XML
- mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage)
- if mobj is None:
- raise ExtractorError(u'Unable to extract media URL')
- video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8')
- video_url = unescapeHTML(video_url)
-
- return [{
- 'id': video_id.decode('utf-8'),
- 'url': video_url,
- 'uploader': video_uploader,
- 'upload_date': None,
- 'title': video_title,
- 'ext': video_extension.decode('utf-8'),
- 'thumbnail': video_thumbnail.decode('utf-8'),
- 'description': video_description,
- }]
-
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+ m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage)
+
+ if m_id is None:
+ # TODO: Check which url parameters are required
+ info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+ webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage')
+ info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
+ <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
+ <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.*
+ <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB"
+ '''
+ self.report_extraction(video_id)
+ m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL)
+ if m_info is None:
+ raise ExtractorError(u'Unable to extract video info')
+ video_title = m_info.group('title')
+ video_description = m_info.group('description')
+ video_thumb = m_info.group('thumb')
+ video_date = m_info.group('date')
+ video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d')
+
+ # TODO: Find a way to get mp4 videos
+ rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id
+ webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage')
+ m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage)
+ video_url = m_rest.group('url')
+ video_path = m_rest.group('path')
+ if m_rest is None:
+ raise ExtractorError(u'Unable to extract video url')
+
+ else: # We have to use a different method if another id is defined
+ long_id = m_id.group('new_id')
+ info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335'
+ webpage = self._download_webpage(info_url, video_id, u'Downloading info json')
+ json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1)
+ info = json.loads(json_str)
+ res = info[u'query'][u'results'][u'mediaObj'][0]
+ stream = res[u'streams'][0]
+ video_path = stream[u'path']
+ video_url = stream[u'host']
+ meta = res[u'meta']
+ video_title = meta[u'title']
+ video_description = meta[u'description']
+ video_thumb = meta[u'thumbnail']
+ video_date = None # I can't find it
+
+ info_dict = {
+ 'id': video_id,
+ 'url': video_url,
+ 'play_path': video_path,
+ 'title':video_title,
+ 'description': video_description,
+ 'thumbnail': video_thumb,
+ 'upload_date': video_date,
+ 'ext': 'flv',
+ }
+ return info_dict
class VimeoIE(InfoExtractor):
"""Information extractor for vimeo.com."""
@@ -1452,13 +1401,13 @@ class YoutubeSearchIE(InfoExtractor):
if len(video_ids) > n:
video_ids = video_ids[:n]
videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids]
- return videos
+ return self.playlist_result(videos, query)
class GoogleSearchIE(InfoExtractor):
"""Information Extractor for Google Video search queries."""
_VALID_URL = r'gvsearch(?P<prefix>|\d+|all):(?P<query>[\s\S]+)'
- _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"'
+ _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
_max_google_results = 1000
IE_NAME = u'video.google:search'
@@ -1490,7 +1439,7 @@ class GoogleSearchIE(InfoExtractor):
}
for pagenum in itertools.count(1):
- result_url = u'http://video.google.com/videosearch?q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
+ result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
webpage = self._download_webpage(result_url, u'gvsearch:' + query,
note='Downloading result page ' + str(pagenum))
@@ -1507,18 +1456,10 @@ class GoogleSearchIE(InfoExtractor):
class YahooSearchIE(InfoExtractor):
"""Information Extractor for Yahoo! Video search queries."""
- _WORKING = False
_VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+'
- _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s'
- _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"'
- _MORE_PAGES_INDICATOR = r'\s*Next'
- _max_yahoo_results = 1000
- IE_NAME = u'video.yahoo:search'
- def report_download_page(self, query, pagenum):
- """Report attempt to download playlist page with given number."""
- query = query.decode(preferredencoding())
- self.to_screen(u'query "%s": Downloading page %s' % (query, pagenum))
+ _max_yahoo_results = 1000
+ IE_NAME = u'screen.yahoo:search'
def _real_extract(self, query):
mobj = re.match(self._VALID_URL, query)
@@ -1529,11 +1470,9 @@ class YahooSearchIE(InfoExtractor):
prefix = prefix[8:]
query = query.encode('utf-8')
if prefix == '':
- self._download_n_results(query, 1)
- return
+ return self._get_n_results(query, 1)
elif prefix == 'all':
- self._download_n_results(query, self._max_yahoo_results)
- return
+ return self._get_n_results(query, self._max_yahoo_results)
else:
try:
n = int(prefix)
@@ -1542,46 +1481,36 @@ class YahooSearchIE(InfoExtractor):
elif n > self._max_yahoo_results:
self._downloader.report_warning(u'yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n))
n = self._max_yahoo_results
- self._download_n_results(query, n)
- return
+ return self._get_n_results(query, n)
except ValueError: # parsing prefix as integer fails
- self._download_n_results(query, 1)
- return
-
- def _download_n_results(self, query, n):
- """Downloads a specified number of results for a query"""
-
- video_ids = []
- already_seen = set()
- pagenum = 1
+ return self._get_n_results(query, 1)
- while True:
- self.report_download_page(query, pagenum)
- result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum)
- request = compat_urllib_request.Request(result_url)
- try:
- page = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- raise ExtractorError(u'Unable to download webpage: %s' % compat_str(err))
+ def _get_n_results(self, query, n):
+ """Get a specified number of results for a query"""
- # Extract video identifiers
- for mobj in re.finditer(self._VIDEO_INDICATOR, page):
- video_id = mobj.group(1)
- if video_id not in already_seen:
- video_ids.append(video_id)
- already_seen.add(video_id)
- if len(video_ids) == n:
- # Specified n videos reached
- for id in video_ids:
- self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
- return
-
- if re.search(self._MORE_PAGES_INDICATOR, page) is None:
- for id in video_ids:
- self._downloader.download(['http://video.yahoo.com/watch/%s' % id])
- return
+ res = {
+ '_type': 'playlist',
+ 'id': query,
+ 'entries': []
+ }
+ for pagenum in itertools.count(0):
+ result_url = u'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
+ webpage = self._download_webpage(result_url, query,
+ note='Downloading results page '+str(pagenum+1))
+ info = json.loads(webpage)
+ m = info[u'm']
+ results = info[u'results']
+
+ for (i, r) in enumerate(results):
+ if (pagenum * 30) +i >= n:
+ break
+ mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
+ e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
+ res['entries'].append(e)
+ if (pagenum * 30 +i >= n) or (m[u'last'] >= (m[u'total'] -1 )):
+ break
- pagenum = pagenum + 1
+ return res
class YoutubePlaylistIE(InfoExtractor):
diff --git a/youtube_dl/PostProcessor.py b/youtube_dl/PostProcessor.py
index 0c6885dda..8868b37af 100644
--- a/youtube_dl/PostProcessor.py
+++ b/youtube_dl/PostProcessor.py
@@ -85,8 +85,9 @@ class FFmpegPostProcessor(PostProcessor):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
if p.returncode != 0:
+ stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
- raise FFmpegPostProcessorError(msg.decode('utf-8', 'replace'))
+ raise FFmpegPostProcessorError(msg)
def _ffmpeg_filename_argument(self, fn):
# ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 80f3b9f33..308c48fe6 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -191,8 +191,8 @@ def parseOpts(overrideArguments=None):
action='store_true', dest='writesubtitles',
help='write subtitle file (currently youtube only)', default=False)
video_format.add_option('--only-sub',
- action='store_true', dest='onlysubtitles',
- help='downloads only the subtitles (no video)', default=False)
+ action='store_true', dest='skip_download',
+ help='[deprecated] alias of --skip-download', default=False)
video_format.add_option('--all-subs',
action='store_true', dest='allsubtitles',
help='downloads all the available subtitles of the video (currently youtube only)', default=False)
@@ -532,7 +532,6 @@ def _real_main(argv=None):
'writeinfojson': opts.writeinfojson,
'writethumbnail': opts.writethumbnail,
'writesubtitles': opts.writesubtitles,
- 'onlysubtitles': opts.onlysubtitles,
'allsubtitles': opts.allsubtitles,
'listsubtitles': opts.listsubtitles,
'subtitlesformat': opts.subtitlesformat,
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index f2342b10a..616948e10 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+import errno
import gzip
import io
import json
@@ -334,12 +335,20 @@ def sanitize_open(filename, open_mode):
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
- # In case of error, try to remove win32 forbidden chars
- filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename)
+ if err.errno in (errno.EACCES,):
+ raise
- # An exception here should be caught in the caller
- stream = open(encodeFilename(filename), open_mode)
- return (stream, filename)
+ # In case of error, try to remove win32 forbidden chars
+ alt_filename = os.path.join(
+ re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part)
+ for path_part in os.path.split(filename)
+ )
+ if alt_filename == filename:
+ raise
+ else:
+ # An exception here should be caught in the caller
+ stream = open(encodeFilename(filename), open_mode)
+ return (stream, alt_filename)
def timeconvert(timestr):
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index ae9688e17..6619cfecc 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,2 +1,2 @@
-__version__ = '2013.05.10'
+__version__ = '2013.05.13'