aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/test_youtube_lists.py3
-rw-r--r--test/tests.json29
-rw-r--r--youtube_dl/FileDownloader.py50
-rwxr-xr-xyoutube_dl/InfoExtractors.py153
-rw-r--r--youtube_dl/__init__.py7
5 files changed, 227 insertions, 15 deletions
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
index 78657b51c..b842e6cc1 100644
--- a/test/test_youtube_lists.py
+++ b/test/test_youtube_lists.py
@@ -53,8 +53,7 @@ class TestYoutubeLists(unittest.TestCase):
dl = FakeDownloader()
ie = YoutubePlaylistIE(dl)
result = ie.extract('PLBB231211A4F62143')[0]
- self.assertEqual(result['title'], 'Team Fortress 2')
- self.assertTrue(len(result['entries']) > 40)
+ self.assertTrue(len(result['entries']) > 25)
def test_youtube_playlist_long(self):
dl = FakeDownloader()
diff --git a/test/tests.json b/test/tests.json
index 8a3e8e8e1..30ebd964b 100644
--- a/test/tests.json
+++ b/test/tests.json
@@ -543,11 +543,11 @@
},
{
"name": "Yahoo",
- "url": "http://screen.yahoo.com/obama-celebrates-iraq-victory-27592561.html",
- "file": "27592561.flv",
- "md5": "c6179bed843512823fd284fa2e7f012d",
+ "url": "http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html",
+ "file": "214727115.flv",
+ "md5": "2e717f169c1be93d84d3794a00d4a325",
"info_dict": {
- "title": "Obama Celebrates Iraq Victory"
+ "title": "Julian Smith & Travis Legg Watch Julian Smith"
},
"skip": "Requires rtmpdump"
},
@@ -609,7 +609,26 @@
"file": "1v6ga.mp3",
"md5": "b9cc91b5af8995e9f0c1cee04c575828",
"info_dict":{
- "title":"TAME"
+ "title":"Tame"
}
+ },
+ {
+ "name": "Vbox7",
+ "url": "http://vbox7.com/play:249bb972c2",
+ "file": "249bb972c2.flv",
+ "md5": "9c70d6d956f888bdc08c124acc120cfe",
+ "info_dict":{
+ "title":"Смях! Чудо - чист за секунди - Скрита камера"
+ }
+ },
+ {
+ "name": "Gametrailers",
+ "url": "http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer",
+ "file": "zbvr8i.flv",
+ "md5": "c3edbc995ab4081976e16779bd96a878",
+ "info_dict": {
+ "title": "E3 2013: Debut Trailer"
+ },
+ "skip": "Requires rtmpdump"
}
]
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py
index 49f3a8712..72f03c217 100644
--- a/youtube_dl/FileDownloader.py
+++ b/youtube_dl/FileDownloader.py
@@ -539,6 +539,11 @@ class FileDownloader(object):
'playlist': playlist,
'playlist_index': i + playliststart,
}
+ if not 'extractor' in entry:
+ # We set the extractor, if it's an url it will be set then to
+ # the new extractor, but if it's already a video we must make
+ # sure it's present: see issue #877
+ entry['extractor'] = ie_result['extractor']
entry_result = self.process_ie_result(entry,
download=download,
extra_info=extra)
@@ -758,21 +763,21 @@ class FileDownloader(object):
except (OSError, IOError):
self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
return False
+ verbosity_option = '--verbose' if self.params.get('verbose', False) else '--quiet'
# Download using rtmpdump. rtmpdump returns exit code 2 when
# the connection was interrumpted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
- basic_args = ['rtmpdump', '-q', '-r', url, '-o', tmpfilename]
- if self.params.get('verbose', False): basic_args[1] = '-v'
+ basic_args = ['rtmpdump', verbosity_option, '-r', url, '-o', tmpfilename]
if player_url is not None:
- basic_args += ['-W', player_url]
+ basic_args += ['--swfVfy', player_url]
if page_url is not None:
basic_args += ['--pageUrl', page_url]
if play_path is not None:
- basic_args += ['-y', play_path]
+ basic_args += ['--playpath', play_path]
if tc_url is not None:
basic_args += ['--tcUrl', url]
- args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
+ args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
if self.params.get('verbose', False):
try:
import pipes
@@ -810,6 +815,37 @@ class FileDownloader(object):
self.report_error(u'rtmpdump exited with code %d' % retval)
return False
+ def _download_with_mplayer(self, filename, url):
+ self.report_destination(filename)
+ tmpfilename = self.temp_name(filename)
+
+ args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
+ # Check for mplayer first
+ try:
+ subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
+ except (OSError, IOError):
+ self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0] )
+ return False
+
+ # Download using mplayer.
+ retval = subprocess.call(args)
+ if retval == 0:
+ fsize = os.path.getsize(encodeFilename(tmpfilename))
+ self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
+ self.try_rename(tmpfilename, filename)
+ self._hook_progress({
+ 'downloaded_bytes': fsize,
+ 'total_bytes': fsize,
+ 'filename': filename,
+ 'status': 'finished',
+ })
+ return True
+ else:
+ self.to_stderr(u"\n")
+ self.report_error(u'mplayer exited with code %d' % retval)
+ return False
+
+
def _do_download(self, filename, info_dict):
url = info_dict['url']
@@ -830,6 +866,10 @@ class FileDownloader(object):
info_dict.get('play_path', None),
info_dict.get('tc_url', None))
+ # Attempt to download using mplayer
+ if url.startswith('mms') or url.startswith('rtsp'):
+ return self._download_with_mplayer(filename, url)
+
tmpfilename = self.temp_name(filename)
stream = None
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 3c95012b1..a25ccc173 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -1410,6 +1410,9 @@ class GenericIE(InfoExtractor):
# Broaden the search a little bit: JWPlayer JS loader
mobj = re.search(r'[^A-Za-z0-9]?file:\s*["\'](http[^\'"&]*)', webpage)
if mobj is None:
+ # Try to find twitter cards info
+ mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
+ if mobj is None:
raise ExtractorError(u'Invalid URL: %s' % url)
# It's possible that one of the regexes
@@ -1456,7 +1459,6 @@ class YoutubeSearchIE(SearchInfoExtractor):
def report_download_page(self, query, pagenum):
"""Report attempt to download search page with given number."""
- query = query.decode(preferredencoding())
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
def _get_n_results(self, query, n):
@@ -4001,6 +4003,64 @@ class ARDIE(InfoExtractor):
info["url"] = stream["video_url"]
return [info]
+class ZDFIE(InfoExtractor):
+ _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?'
+ _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>'
+ _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>'
+ _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'
+ _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group('video_id')
+
+ html = self._download_webpage(url, video_id)
+ streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
+ if streams is None:
+ raise ExtractorError(u'No media url found.')
+
+ # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url
+ # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url
+ # choose first/default media type and highest quality for now
+ for s in streams: #find 300 - dsl1000mbit
+ if s['quality'] == '300' and s['media_type'] == 'wstreaming':
+ stream_=s
+ break
+ for s in streams: #find veryhigh - dsl2000mbit
+ if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working
+ stream_=s
+ break
+ if stream_ is None:
+ raise ExtractorError(u'No stream found.')
+
+ media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL')
+
+ self.report_extraction(video_id)
+ mobj = re.search(self._TITLE, html)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract title')
+ title = unescapeHTML(mobj.group('title'))
+
+ mobj = re.search(self._MMS_STREAM, media_link)
+ if mobj is None:
+ mobj = re.search(self._RTSP_STREAM, media_link)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL')
+ mms_url = mobj.group('video_url')
+
+ mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url)
+ if mobj is None:
+ raise ExtractorError(u'Cannot extract extention')
+ ext = mobj.group('ext')
+
+ return [{'id': video_id,
+ 'url': mms_url,
+ 'title': title,
+ 'ext': ext
+ }]
+
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
@@ -4394,6 +4454,94 @@ class HypemIE(InfoExtractor):
'artist': artist,
}]
+class Vbox7IE(InfoExtractor):
+ """Information Extractor for Vbox7"""
+ _VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
+
+ def _real_extract(self,url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group(1)
+
+ redirect_page, urlh = self._download_webpage_handle(url, video_id)
+ redirect_url = urlh.geturl() + re.search(r'window\.location = \'(.*)\';', redirect_page).group(1)
+ webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
+
+ title = re.search(r'<title>(.*)</title>', webpage)
+ title = (title.group(1)).split('/')[0].strip()
+
+ ext = "flv"
+ info_url = "http://vbox7.com/play/magare.do"
+ data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
+ info_request = compat_urllib_request.Request(info_url, data)
+ info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+ info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
+ if info_response is None:
+ raise ExtractorError(u'Unable to extract the media url')
+ (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
+
+ return [{
+ 'id': video_id,
+ 'url': final_url,
+ 'ext': ext,
+ 'title': title,
+ 'thumbnail': thumbnail_url,
+ }]
+
+class GametrailersIE(InfoExtractor):
+ _VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ raise ExtractorError(u'Invalid URL: %s' % url)
+ video_id = mobj.group('id')
+ video_type = mobj.group('type')
+ webpage = self._download_webpage(url, video_id)
+ if video_type == 'full-episodes':
+ mgid_re = r'data-video="(?P<mgid>mgid:.*?)"'
+ else:
+ mgid_re = r'data-contentId=\'(?P<mgid>mgid:.*?)\''
+ m_mgid = re.search(mgid_re, webpage)
+ if m_mgid is None:
+ raise ExtractorError(u'Unable to extract mgid')
+ mgid = m_mgid.group(1)
+ data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
+
+ info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
+ video_id, u'Downloading video info')
+ links_webpage = self._download_webpage('http://www.gametrailers.com/feeds/mediagen/?' + data,
+ video_id, u'Downloading video urls info')
+
+ self.report_extraction(video_id)
+ info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
+ <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
+ '''
+
+ m_info = re.search(info_re, info_page, re.VERBOSE|re.DOTALL)
+ if m_info is None:
+ raise ExtractorError(u'Unable to extract video info')
+ video_title = m_info.group('title')
+ video_description = m_info.group('description')
+ video_thumb = m_info.group('thumb')
+
+ m_urls = re.finditer(r'<src>(?P<url>.*)</src>', links_webpage)
+ if m_urls is None:
+ raise ExtractError(u'Unable to extrat video url')
+ # They are sorted from worst to best quality
+ video_url = list(m_urls)[-1].group('url')
+
+ return {'url': video_url,
+ 'id': video_id,
+ 'title': video_title,
+ # Videos are actually flv not mp4
+ 'ext': 'flv',
+ 'thumbnail': video_thumb,
+ 'description': video_description,
+ }
def gen_extractors():
""" Return a list of an instance of every supported extractor.
@@ -4448,6 +4596,7 @@ def gen_extractors():
SpiegelIE(),
LiveLeakIE(),
ARDIE(),
+ ZDFIE(),
TumblrIE(),
BandcampIE(),
RedTubeIE(),
@@ -4458,6 +4607,8 @@ def gen_extractors():
TeamcocoIE(),
XHamsterIE(),
HypemIE(),
+ Vbox7IE(),
+ GametrailersIE(),
GenericIE()
]
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 308c48fe6..9279ce776 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -392,8 +392,11 @@ def _real_main(argv=None):
# General configuration
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
- if opts.proxy:
- proxies = {'http': opts.proxy, 'https': opts.proxy}
+ if opts.proxy is not None:
+ if opts.proxy == '':
+ proxies = {}
+ else:
+ proxies = {'http': opts.proxy, 'https': opts.proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)