diff options
-rw-r--r-- | README.md | 19 | ||||
-rw-r--r-- | test/test_download.py | 19 | ||||
-rw-r--r-- | test/test_write_info_json.py | 2 | ||||
-rw-r--r-- | test/test_youtube_lists.py | 2 | ||||
-rw-r--r-- | test/test_youtube_subtitles.py | 2 | ||||
-rw-r--r-- | test/tests.json | 25 | ||||
-rw-r--r-- | youtube_dl/FileDownloader.py | 7 | ||||
-rwxr-xr-x | youtube_dl/InfoExtractors.py | 90 | ||||
-rw-r--r-- | youtube_dl/__init__.py | 6 | ||||
-rw-r--r-- | youtube_dl/utils.py | 11 |
10 files changed, 106 insertions, 77 deletions
@@ -46,15 +46,16 @@ which means you can modify it, redistribute it or use it however you like. -A, --auto-number number downloaded files starting from 00000 -o, --output TEMPLATE output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, - %(autonumber)s to get an automatically incremented - number, %(ext)s for the filename extension, - %(upload_date)s for the upload date (YYYYMMDD), - %(extractor)s for the provider (youtube, metacafe, - etc), %(id)s for the video id and %% for a literal - percent. Use - to output to stdout. Can also be - used to download to a different directory, for - example with -o '/my/downloads/%(uploader)s/%(title - )s-%(id)s.%(ext)s' . + %(uploader_id)s for the uploader nickname if + different, %(autonumber)s to get an automatically + incremented number, %(ext)s for the filename + extension, %(upload_date)s for the upload date + (YYYYMMDD), %(extractor)s for the provider + (youtube, metacafe, etc), %(id)s for the video id + and %% for a literal percent. Use - to output to + stdout. Can also be used to download to a different + directory, for example with -o '/my/downloads/%(upl + oader)s/%(title)s-%(id)s.%(ext)s' . --restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames -a, --batch-file FILE file containing URLs to download ('-' for stdin) diff --git a/test/test_download.py b/test/test_download.py index bce0e4fcd..1ee1b334d 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -6,6 +6,7 @@ import os import json import unittest import sys +import hashlib import socket # Allow direct execution @@ -24,12 +25,15 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FileDownloader(youtube_dl.FileDownloader): def __init__(self, *args, **kwargs): - youtube_dl.FileDownloader.__init__(self, *args, **kwargs) self.to_stderr = self.to_screen + self.processed_info_dicts = [] + return youtube_dl.FileDownloader.__init__(self, *args, **kwargs) + def process_info(self, info_dict): + self.processed_info_dicts.append(info_dict) + return youtube_dl.FileDownloader.process_info(self, info_dict) def _file_md5(fn): with open(fn, 'rb') as f: @@ -40,6 +44,7 @@ with io.open(DEF_FILE, encoding='utf-8') as deff: with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: parameters = json.load(pf) + class TestDownload(unittest.TestCase): def setUp(self): self.parameters = parameters @@ -68,18 +73,28 @@ def generator(test_case): if 'skip' in test_case: print('Skipping: {0}'.format(test_case['skip'])) return + params = dict(self.parameters) # Duplicate it locally for p in test_case.get('params', {}): params[p] = test_case['params'][p] + fd = FileDownloader(params) fd.add_info_extractor(ie()) for ien in test_case.get('add_ie', []): fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')()) fd.download([test_case['url']]) + self.assertTrue(os.path.exists(test_case['file'])) if 'md5' in test_case: md5_for_file = _file_md5(test_case['file']) self.assertEqual(md5_for_file, test_case['md5']) + info_dict = fd.processed_info_dicts[0] + for (info_field, value) in test_case.get('info_dict', {}).items(): + if value.startswith('md5:'): + md5_info_value = hashlib.md5(info_dict.get(info_field, '')).hexdigest() + self.assertEqual(value[3:], md5_info_value) + else: + self.assertEqual(value, info_dict.get(info_field)) return test_template diff --git a/test/test_write_info_json.py b/test/test_write_info_json.py index ebf543980..8134dda37 100644 --- a/test/test_write_info_json.py +++ b/test/test_write_info_json.py @@ -3,7 +3,6 @@ import json import os -import socket import sys import unittest @@ -22,7 +21,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FileDownloader(youtube_dl.FileDownloader): def __init__(self, *args, **kwargs): diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py index e352e5ab9..3044e0852 100644 --- a/test/test_youtube_lists.py +++ b/test/test_youtube_lists.py @@ -2,7 +2,6 @@ import sys import unittest -import socket import json # Allow direct execution @@ -22,7 +21,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FakeDownloader(object): def __init__(self): diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py index 64a391d14..5d3566a35 100644 --- a/test/test_youtube_subtitles.py +++ b/test/test_youtube_subtitles.py @@ -2,7 +2,6 @@ import sys import unittest -import socket import json import io import hashlib @@ -24,7 +23,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FakeDownloader(object): def __init__(self): diff --git a/test/tests.json b/test/tests.json index 9bf56082e..dbff62676 100644 --- a/test/tests.json +++ b/test/tests.json @@ -2,7 +2,14 @@ { "name": "Youtube", "url": "http://www.youtube.com/watch?v=BaW_jenozKc", - "file": "BaW_jenozKc.mp4" + "file": "BaW_jenozKc.mp4", + "info_dict": { + "title": "youtube-dl test video \"'/\\ä↭𝕐", + "uploader": "Philipp Hagemeister", + "uploader_id": "phihag", + "upload_date": "20121002", + "description": "test chars: \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ." + } }, { "name": "Dailymotion", @@ -30,9 +37,16 @@ }, { "name": "Vimeo", - "md5": "60540a4ec7cc378ec84b919c0aed5023", - "url": "http://vimeo.com/14160053", - "file": "14160053.mp4" + "md5": "8879b6cc097e987f02484baf890129e5", + "url": "http://vimeo.com/56015672", + "file": "56015672.mp4", + "info_dict": { + "title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐", + "uploader": "Filippo Valsorda", + "uploader_id": "user7108434", + "upload_date": "20121220", + "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐" + } }, { "name": "Soundcloud", @@ -81,7 +95,8 @@ "name": "Escapist", "url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate", "file": "6618-Breaking-Down-Baldurs-Gate.flv", - "md5": "c6793dbda81388f4264c1ba18684a74d" + "md5": "c6793dbda81388f4264c1ba18684a74d", + "skip": "Fails with timeout on Travis" }, { "name": "GooglePlus", diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index a861086c3..d9a4ecd3a 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -433,11 +433,8 @@ class FileDownloader(object): try: srtfn = filename.rsplit('.', 1)[0] + u'.srt' self.report_writesubtitles(srtfn) - srtfile = open(encodeFilename(srtfn), 'wb') - try: - srtfile.write(info_dict['subtitles'].encode('utf-8')) - finally: - srtfile.close() + with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile: + srtfile.write(info_dict['subtitles']) except (OSError, IOError): self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) return diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index d94ebde34..697c031c5 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -32,7 +32,7 @@ class InfoExtractor(object): id: Video identifier. url: Final video URL. - uploader: Nickname of the video uploader, unescaped. + uploader: Full name of the video uploader, unescaped. upload_date: Video upload date (YYYYMMDD). title: Video title, unescaped. ext: Video filename extension. @@ -42,6 +42,7 @@ class InfoExtractor(object): format: The video format, defaults to ext (used for --get-format) thumbnail: Full URL to a video thumbnail image. description: One-line video description. + uploader_id: Nickname or id of the video uploader. player_url: SWF Player URL (used for rtmpdump). subtitles: The .srt file contents. urlhandle: [internal] The urlHandle to be used to download the file, @@ -219,6 +220,34 @@ class YoutubeIE(InfoExtractor): srt += caption + '\n\n' return srt + def _extract_subtitles(self, video_id): + self.report_video_subtitles_download(video_id) + request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) + try: + srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) + srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) + if not srt_lang_list: + return (u'WARNING: video has no closed captions', None) + if self._downloader.params.get('subtitleslang', False): + srt_lang = self._downloader.params.get('subtitleslang') + elif 'en' in srt_lang_list: + srt_lang = 'en' + else: + srt_lang = list(srt_lang_list.keys())[0] + if not srt_lang in srt_lang_list: + return (u'WARNING: no closed captions found in the specified language', None) + request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) + try: + srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + if not srt_xml: + return (u'WARNING: unable to download video subtitles', None) + return (None, self._closed_captions_xml_to_srt(srt_xml)) + def _print_formats(self, formats): print('Available formats:') for x in formats: @@ -356,10 +385,18 @@ class YoutubeIE(InfoExtractor): # uploader if 'author' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + self._downloader.trouble(u'ERROR: unable to extract uploader name') return video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) + # uploader_id + video_uploader_id = None + mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/user/([^"]+)">', video_webpage) + if mobj is not None: + video_uploader_id = mobj.group(1) + else: + self._downloader.trouble(u'WARNING: unable to extract uploader nickname') + # title if 'title' not in video_info: self._downloader.trouble(u'ERROR: unable to extract video title') @@ -395,35 +432,9 @@ class YoutubeIE(InfoExtractor): # closed captions video_subtitles = None if self._downloader.params.get('writesubtitles', False): - try: - self.report_video_subtitles_download(video_id) - request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) - try: - srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) - srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) - srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) - if not srt_lang_list: - raise Trouble(u'WARNING: video has no closed captions') - if self._downloader.params.get('subtitleslang', False): - srt_lang = self._downloader.params.get('subtitleslang') - elif 'en' in srt_lang_list: - srt_lang = 'en' - else: - srt_lang = srt_lang_list.keys()[0] - if not srt_lang in srt_lang_list: - raise Trouble(u'WARNING: no closed captions found in the specified language') - request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) - try: - srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) - if not srt_xml: - raise Trouble(u'WARNING: unable to download video subtitles') - video_subtitles = self._closed_captions_xml_to_srt(srt_xml) - except Trouble as trouble: - self._downloader.trouble(str(trouble)) + (srt_error, video_subtitles) = self._extract_subtitles(video_id) + if srt_error: + self._downloader.trouble(srt_error) if 'length_seconds' not in video_info: self._downloader.trouble(u'WARNING: unable to extract video duration') @@ -443,7 +454,7 @@ class YoutubeIE(InfoExtractor): elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') url_data = [compat_parse_qs(uds) for uds in url_data_strs] - url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) + url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud] url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) format_limit = self._downloader.params.get('format_limit', None) @@ -493,6 +504,7 @@ class YoutubeIE(InfoExtractor): 'id': video_id, 'url': video_real_url, 'uploader': video_uploader, + 'uploader_id': video_uploader_id, 'upload_date': upload_date, 'title': video_title, 'ext': video_extension, @@ -992,8 +1004,9 @@ class VimeoIE(InfoExtractor): # Extract title video_title = config["video"]["title"] - # Extract uploader + # Extract uploader and uploader_id video_uploader = config["video"]["owner"]["name"] + video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] # Extract video thumbnail video_thumbnail = config["video"]["thumbnail"] @@ -1005,9 +1018,9 @@ class VimeoIE(InfoExtractor): # Extract upload date video_upload_date = None - mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage) + mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage) if mobj is not None: - video_upload_date = mobj.group(1) + video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) # Vimeo specific: extract request signature and timestamp sig = config['request']['signature'] @@ -1045,6 +1058,7 @@ class VimeoIE(InfoExtractor): 'id': video_id, 'url': video_url, 'uploader': video_uploader, + 'uploader_id': video_uploader_id, 'upload_date': video_upload_date, 'title': video_title, 'ext': video_extension, @@ -2113,7 +2127,7 @@ class FacebookIE(InfoExtractor): video_description = video_info.get('description', 'No description available.') url_map = video_info['video_urls'] - if len(url_map.keys()) > 0: + if len(list(url_map.keys())) > 0: # Decide which formats to download req_format = self._downloader.params.get('format', None) format_limit = self._downloader.params.get('format_limit', None) @@ -2973,7 +2987,7 @@ class MixcloudIE(InfoExtractor): if file_url is not None: break # got it! else: - if req_format not in formats.keys(): + if req_format not in list(formats.keys()): self._downloader.trouble(u'ERROR: format is not available') return @@ -3272,7 +3286,7 @@ class YoukuIE(InfoExtractor): seed = config['data'][0]['seed'] format = self._downloader.params.get('format', None) - supported_format = config['data'][0]['streamfileids'].keys() + supported_format = list(config['data'][0]['streamfileids'].keys()) if format is None or format == 'best': if 'hd2' in supported_format: diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index f94e0dcdb..c7a0bb959 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -307,7 +307,7 @@ def parseOpts(): action='store_true', dest='autonumber', help='number downloaded files starting from 00000', default=False) filesystem.add_option('-o', '--output', - dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .') + dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .') filesystem.add_option('--restrict-filenames', action='store_true', dest='restrictfilenames', help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False) @@ -454,8 +454,8 @@ def _real_main(): if opts.list_extractors: for ie in extractors: print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) - matchedUrls = filter(lambda url: ie.suitable(url), all_urls) - all_urls = filter(lambda url: url not in matchedUrls, all_urls) + matchedUrls = [url for url in all_urls if ie.suitable(url)] + all_urls = [url for url in all_urls if url not in matchedUrls] for mu in matchedUrls: print(u' ' + mu) sys.exit(0) diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index a5196b0ae..463804e18 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -298,7 +298,8 @@ def clean_html(html): """Clean an HTML snippet into a readable string""" # Newline vs <br /> html = html.replace('\n', ' ') - html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html) + html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) + html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities @@ -465,14 +466,6 @@ class ContentTooShortError(Exception): self.downloaded = downloaded self.expected = expected - -class Trouble(Exception): - """Trouble helper exception - - This is an exception to be handled with - FileDownloader.trouble - """ - class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. |