diff options
| author | Sergey M․ <dstftw@gmail.com> | 2015-03-17 21:39:45 +0600 | 
|---|---|---|
| committer | Sergey M․ <dstftw@gmail.com> | 2015-03-17 21:39:45 +0600 | 
| commit | 00bfe40e4d2280d4988e1ce7402f65a0f1708cb8 (patch) | |
| tree | e42919244305450549ee6bc1797949ff041da99c | |
| parent | 219da6bb685765186b7ffb878399c32f44351802 (diff) | |
| parent | cd459b1d490ca8c0639220a835f5e6bee3e9a80d (diff) | |
Merge branch 'yan12125-sohu_fix'
| -rw-r--r-- | test/test_utils.py | 21 | ||||
| -rw-r--r-- | youtube_dl/extractor/sohu.py | 93 | ||||
| -rw-r--r-- | youtube_dl/utils.py | 7 | 
3 files changed, 110 insertions, 11 deletions
diff --git a/test/test_utils.py b/test/test_utils.py index 8f790bf0a..3431ad24e 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -39,6 +39,7 @@ from youtube_dl.utils import (      read_batch_urls,      sanitize_filename,      sanitize_path, +    sanitize_url_path_consecutive_slashes,      shell_quote,      smuggle_url,      str_to_int, @@ -168,6 +169,26 @@ class TestUtil(unittest.TestCase):          self.assertEqual(sanitize_path('./abc'), 'abc')          self.assertEqual(sanitize_path('./../abc'), '..\\abc') +    def test_sanitize_url_path_consecutive_slashes(self): +        self.assertEqual( +            sanitize_url_path_consecutive_slashes('http://hostname/foo//bar/filename.html'), +            'http://hostname/foo/bar/filename.html') +        self.assertEqual( +            sanitize_url_path_consecutive_slashes('http://hostname//foo/bar/filename.html'), +            'http://hostname/foo/bar/filename.html') +        self.assertEqual( +            sanitize_url_path_consecutive_slashes('http://hostname//'), +            'http://hostname/') +        self.assertEqual( +            sanitize_url_path_consecutive_slashes('http://hostname/foo/bar/filename.html'), +            'http://hostname/foo/bar/filename.html') +        self.assertEqual( +            sanitize_url_path_consecutive_slashes('http://hostname/'), +            'http://hostname/') +        self.assertEqual( +            sanitize_url_path_consecutive_slashes('http://hostname/abc//'), +            'http://hostname/abc/') +      def test_ordered_set(self):          self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])          self.assertEqual(orderedSet([]), []) diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index c04791997..11edf616a 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -4,22 +4,87 @@ from __future__ import unicode_literals  import re  from .common import InfoExtractor -from .common import compat_str +from ..compat import ( +    compat_str, +    compat_urllib_request +) +from ..utils import sanitize_url_path_consecutive_slashes  class SohuIE(InfoExtractor):      _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' -    _TEST = { +    _TESTS = [{ +        'note': 'This video is available only in Mainland China',          'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super', -        'md5': 'bde8d9a6ffd82c63a1eefaef4eeefec7', +        'md5': '29175c8cadd8b5cc4055001e85d6b372',          'info_dict': {              'id': '382479172',              'ext': 'mp4',              'title': 'MV:Far East Movement《The Illest》',          }, -        'skip': 'Only available from China', -    } +        'params': { +            'cn_verification_proxy': 'proxy.uku.im:8888' +        } +    }, { +        'url': 'http://tv.sohu.com/20150305/n409385080.shtml', +        'md5': '699060e75cf58858dd47fb9c03c42cfb', +        'info_dict': { +            'id': '409385080', +            'ext': 'mp4', +            'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》', +        } +    }, { +        'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml', +        'md5': '9bf34be48f2f4dadcb226c74127e203c', +        'info_dict': { +            'id': '78693464', +            'ext': 'mp4', +            'title': '【爱范品】第31期:MWC见不到的奇葩手机', +        } +    }, { +        'note': 'Multipart video', +        'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml', +        'info_dict': { +            'id': '78910339', +        }, +        'playlist': [{ +            'md5': 'bdbfb8f39924725e6589c146bc1883ad', +            'info_dict': { +                'id': '78910339_part1', +                'ext': 'mp4', +                'duration': 294, +                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', +            } +        }, { +            'md5': '3e1f46aaeb95354fd10e7fca9fc1804e', +            'info_dict': { +                'id': '78910339_part2', +                'ext': 'mp4', +                'duration': 300, +                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', +            } +        }, { +            'md5': '8407e634175fdac706766481b9443450', +            'info_dict': { +                'id': '78910339_part3', +                'ext': 'mp4', +                'duration': 150, +                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', +            } +        }] +    }, { +        'note': 'Video with title containing dash', +        'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml', +        'info_dict': { +            'id': '78932792', +            'ext': 'mp4', +            'title': 'youtube-dl testing video', +        }, +        'params': { +            'skip_download': True +        } +    }]      def _real_extract(self, url): @@ -29,8 +94,14 @@ class SohuIE(InfoExtractor):              else:                  base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' +            req = compat_urllib_request.Request(base_data_url + vid_id) + +            cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') +            if cn_verification_proxy: +                req.add_header('Ytdl-request-proxy', cn_verification_proxy) +              return self._download_json( -                base_data_url + vid_id, video_id, +                req, video_id,                  'Downloading JSON data for %s' % vid_id)          mobj = re.match(self._VALID_URL, url) @@ -38,10 +109,8 @@ class SohuIE(InfoExtractor):          mytv = mobj.group('mytv') is not None          webpage = self._download_webpage(url, video_id) -        raw_title = self._html_search_regex( -            r'(?s)<title>(.+?)</title>', -            webpage, 'video title') -        title = raw_title.partition('-')[0].strip() + +        title = self._og_search_title(webpage)          vid = self._html_search_regex(              r'var vid ?= ?["\'](\d+)["\']', @@ -77,7 +146,9 @@ class SohuIE(InfoExtractor):                      % (format_id, i + 1, part_count))                  part_info = part_str.split('|') -                video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) + +                video_url = sanitize_url_path_consecutive_slashes( +                    '%s%s?key=%s' % (part_info[0], su[i], part_info[3]))                  formats.append({                      'url': video_url, diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index c3135effc..472d4df41 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -326,6 +326,13 @@ def sanitize_path(s):      return os.path.join(*sanitized_path) +def sanitize_url_path_consecutive_slashes(url): +    """Collapses consecutive slashes in URLs' path""" +    parsed_url = list(compat_urlparse.urlparse(url)) +    parsed_url[2] = re.sub(r'/{2,}', '/', parsed_url[2]) +    return compat_urlparse.urlunparse(parsed_url) + +  def orderedSet(iterable):      """ Remove all duplicates from the input iterable """      res = []  | 
