aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFilippo Valsorda <filippo.valsorda@gmail.com>2012-12-20 16:30:55 +0100
committerFilippo Valsorda <filippo.valsorda@gmail.com>2012-12-20 16:30:55 +0100
commit6b3aef80ceba9b4715065be924dcb1f83ec36655 (patch)
tree37f54709feb3ee463384643a8f6fba6996300ab5
parent77c4beab8a0c13e158bad8af4e014d57766f1940 (diff)
better Vimeo tests; fixed a couple of VimeoIE fields
-rw-r--r--test/test_download.py9
-rw-r--r--test/test_write_info_json.py2
-rw-r--r--test/test_youtube_lists.py2
-rw-r--r--test/test_youtube_subtitles.py2
-rw-r--r--test/tests.json13
-rwxr-xr-xyoutube_dl/InfoExtractors.py4
-rw-r--r--youtube_dl/utils.py3
7 files changed, 18 insertions, 17 deletions
diff --git a/test/test_download.py b/test/test_download.py
index 9a6d4d604..1ee1b334d 100644
--- a/test/test_download.py
+++ b/test/test_download.py
@@ -6,8 +6,8 @@ import os
import json
import unittest
import sys
-import socket
import hashlib
+import socket
# Allow direct execution
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -25,7 +25,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
-socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
class FileDownloader(youtube_dl.FileDownloader):
def __init__(self, *args, **kwargs):
@@ -90,12 +89,12 @@ def generator(test_case):
md5_for_file = _file_md5(test_case['file'])
self.assertEqual(md5_for_file, test_case['md5'])
info_dict = fd.processed_info_dicts[0]
- for (info_element, value) in test_case.get('info_dict', {}).items():
+ for (info_field, value) in test_case.get('info_dict', {}).items():
if value.startswith('md5:'):
- md5_info_value = hashlib.md5(info_dict[info_element]).hexdigest()
+ md5_info_value = hashlib.md5(info_dict.get(info_field, '')).hexdigest()
self.assertEqual(value[3:], md5_info_value)
else:
- self.assertEqual(value, info_dict[info_element])
+ self.assertEqual(value, info_dict.get(info_field))
return test_template
diff --git a/test/test_write_info_json.py b/test/test_write_info_json.py
index ebf543980..8134dda37 100644
--- a/test/test_write_info_json.py
+++ b/test/test_write_info_json.py
@@ -3,7 +3,6 @@
import json
import os
-import socket
import sys
import unittest
@@ -22,7 +21,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
-socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
class FileDownloader(youtube_dl.FileDownloader):
def __init__(self, *args, **kwargs):
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
index e352e5ab9..3044e0852 100644
--- a/test/test_youtube_lists.py
+++ b/test/test_youtube_lists.py
@@ -2,7 +2,6 @@
import sys
import unittest
-import socket
import json
# Allow direct execution
@@ -22,7 +21,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
-socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
class FakeDownloader(object):
def __init__(self):
diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py
index 64a391d14..5d3566a35 100644
--- a/test/test_youtube_subtitles.py
+++ b/test/test_youtube_subtitles.py
@@ -2,7 +2,6 @@
import sys
import unittest
-import socket
import json
import io
import hashlib
@@ -24,7 +23,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
-socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
class FakeDownloader(object):
def __init__(self):
diff --git a/test/tests.json b/test/tests.json
index 83afda985..d24bdf6fc 100644
--- a/test/tests.json
+++ b/test/tests.json
@@ -37,9 +37,16 @@
},
{
"name": "Vimeo",
- "md5": "60540a4ec7cc378ec84b919c0aed5023",
- "url": "http://vimeo.com/14160053",
- "file": "14160053.mp4"
+ "md5": "8879b6cc097e987f02484baf890129e5",
+ "url": "http://vimeo.com/56015672",
+ "file": "56015672.mp4",
+ "info_dict": {
+ "title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐",
+ "uploader": "Filippo Valsorda",
+ "uploader_id": "user7108434",
+ "upload_date": "20121220",
+ "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐"
+ }
},
{
"name": "Soundcloud",
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 3eb070d4a..5a9032331 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -1018,9 +1018,9 @@ class VimeoIE(InfoExtractor):
# Extract upload date
video_upload_date = None
- mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage)
+ mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
if mobj is not None:
- video_upload_date = mobj.group(1)
+ video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
# Vimeo specific: extract request signature and timestamp
sig = config['request']['signature']
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index c18c9beed..463804e18 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -298,7 +298,8 @@ def clean_html(html):
"""Clean an HTML snippet into a readable string"""
# Newline vs <br />
html = html.replace('\n', ' ')
- html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html)
+ html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
+ html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities