aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--test/test_utils.py16
-rw-r--r--youtube_dl/extractor/sohu.py96
-rw-r--r--youtube_dl/utils.py15
3 files changed, 114 insertions, 13 deletions
diff --git a/test/test_utils.py b/test/test_utils.py
index 8f790bf0a..4f0ffd482 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -55,6 +55,7 @@ from youtube_dl.utils import (
xpath_with_ns,
render_table,
match_str,
+ url_sanitize_consecutive_slashes,
)
@@ -538,6 +539,21 @@ ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
+ def test_url_sanitize_consecutive_slashes(self):
+ self.assertEqual(url_sanitize_consecutive_slashes(
+ 'http://hostname/foo//bar/filename.html'),
+ 'http://hostname/foo/bar/filename.html')
+ self.assertEqual(url_sanitize_consecutive_slashes(
+ 'http://hostname//foo/bar/filename.html'),
+ 'http://hostname/foo/bar/filename.html')
+ self.assertEqual(url_sanitize_consecutive_slashes(
+ 'http://hostname//'), 'http://hostname/')
+ self.assertEqual(url_sanitize_consecutive_slashes(
+ 'http://hostname/foo/bar/filename.html'),
+ 'http://hostname/foo/bar/filename.html')
+ self.assertEqual(url_sanitize_consecutive_slashes(
+ 'http://hostname/'), 'http://hostname/')
+
if __name__ == '__main__':
unittest.main()
diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py
index c04791997..ea5cc06b9 100644
--- a/youtube_dl/extractor/sohu.py
+++ b/youtube_dl/extractor/sohu.py
@@ -4,22 +4,87 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from .common import compat_str
+from ..compat import (
+ compat_str,
+ compat_urllib_request
+)
+from ..utils import url_sanitize_consecutive_slashes
class SohuIE(InfoExtractor):
_VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
- _TEST = {
+ _TESTS = [{
+ 'note': 'This video is available only in Mainland China',
'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
- 'md5': 'bde8d9a6ffd82c63a1eefaef4eeefec7',
+ 'md5': '29175c8cadd8b5cc4055001e85d6b372',
'info_dict': {
'id': '382479172',
'ext': 'mp4',
'title': 'MV:Far East Movement《The Illest》',
},
- 'skip': 'Only available from China',
- }
+ 'params': {
+ 'cn_verification_proxy': 'proxy.uku.im:8888'
+ }
+ }, {
+ 'url': 'http://tv.sohu.com/20150305/n409385080.shtml',
+ 'md5': '699060e75cf58858dd47fb9c03c42cfb',
+ 'info_dict': {
+ 'id': '409385080',
+ 'ext': 'mp4',
+ 'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》',
+ }
+ }, {
+ 'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml',
+ 'md5': '9bf34be48f2f4dadcb226c74127e203c',
+ 'info_dict': {
+ 'id': '78693464',
+ 'ext': 'mp4',
+ 'title': '【爱范品】第31期:MWC见不到的奇葩手机',
+ }
+ }, {
+ 'note': 'Multipart video',
+ 'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml',
+ 'info_dict': {
+ 'id': '78910339',
+ },
+ 'playlist': [{
+ 'md5': 'bdbfb8f39924725e6589c146bc1883ad',
+ 'info_dict': {
+ 'id': '78910339_part1',
+ 'ext': 'mp4',
+ 'duration': 294,
+ 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
+ }
+ }, {
+ 'md5': '3e1f46aaeb95354fd10e7fca9fc1804e',
+ 'info_dict': {
+ 'id': '78910339_part2',
+ 'ext': 'mp4',
+ 'duration': 300,
+ 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
+ }
+ }, {
+ 'md5': '8407e634175fdac706766481b9443450',
+ 'info_dict': {
+ 'id': '78910339_part3',
+ 'ext': 'mp4',
+ 'duration': 150,
+ 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
+ }
+ }]
+ }, {
+ 'info': 'Video with title containing dash',
+ 'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml',
+ 'info_dict': {
+ 'id': '78932792',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl testing video',
+ },
+ 'params': {
+ 'skip_download': True
+ }
+ }]
def _real_extract(self, url):
@@ -29,19 +94,22 @@ class SohuIE(InfoExtractor):
else:
base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
- return self._download_json(
- base_data_url + vid_id, video_id,
- 'Downloading JSON data for %s' % vid_id)
+ req = compat_urllib_request.Request(base_data_url + vid_id)
+
+ cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
+ if cn_verification_proxy:
+ req.add_header('Ytdl-request-proxy', cn_verification_proxy)
+
+ return self._download_json(req, video_id,
+ 'Downloading JSON data for %s' % vid_id)
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mytv = mobj.group('mytv') is not None
webpage = self._download_webpage(url, video_id)
- raw_title = self._html_search_regex(
- r'(?s)<title>(.+?)</title>',
- webpage, 'video title')
- title = raw_title.partition('-')[0].strip()
+
+ title = self._og_search_title(webpage)
vid = self._html_search_regex(
r'var vid ?= ?["\'](\d+)["\']',
@@ -77,7 +145,9 @@ class SohuIE(InfoExtractor):
% (format_id, i + 1, part_count))
part_info = part_str.split('|')
- video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
+
+ video_url = url_sanitize_consecutive_slashes(
+ '%s%s?key=%s' % (part_info[0], su[i], part_info[3]))
formats.append({
'url': video_url,
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index c3135effc..e82e3998a 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -1804,3 +1804,18 @@ class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
return None # No Proxy
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
+
+
+def url_sanitize_consecutive_slashes(url):
+ """Sanitize URLs with consecutive slashes
+
+ For example, transform both
+ http://hostname/foo//bar/filename.html
+ and
+ http://hostname//foo/bar/filename.html
+ into
+ http://hostname/foo/bar/filename.html
+ """
+ parsed_url = list(compat_urlparse.urlparse(url))
+ parsed_url[2] = re.sub(r'/{2,}', '/', parsed_url[2])
+ return compat_urlparse.urlunparse(parsed_url)