From 6624a2b07dafad4de895b4e84f4595214817518d Mon Sep 17 00:00:00 2001 From: huohuarong Date: Fri, 2 Aug 2013 17:58:46 +0800 Subject: add an extractor for tv.sohu.com --- youtube_dl/extractor/sohu.py | 97 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 youtube_dl/extractor/sohu.py (limited to 'youtube_dl/extractor/sohu.py') diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py new file mode 100644 index 000000000..830814221 --- /dev/null +++ b/youtube_dl/extractor/sohu.py @@ -0,0 +1,97 @@ +# encoding: utf-8 + +import re +import json +import time +import logging +import urllib2 + +from .common import InfoExtractor +from ..utils import compat_urllib_request + + +class SohuIE(InfoExtractor): + _VALID_URL = r'https?://tv\.sohu\.com/\d+?/n(?P\d+)\.shtml.*?' + + _TEST = { + u'url': u'http://tv.sohu.com/20130724/n382479172.shtml#super', + u'file': u'382479172.flv', + u'md5': u'cc84eed6b6fbf0f2f9a8d3cb9da1939b', + u'info_dict': { + u'title': u'The Illest - Far East Movement Riff Raff', + }, + } + + def _clearn_html(self, string): + tags = re.findall(r'<.+?>', string) + for t in tags: + string = string.replace(t, ' ') + for i in range(2): + spaces = re.findall(r'\s+', string) + for s in spaces: + string = string.replace(s, ' ') + string = string.strip() + return string + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + pattern = r'

\n*?(.+?)\n*?

' + compiled = re.compile(pattern, re.DOTALL) + title = self._search_regex(compiled, webpage, u'video title').strip('\t\n') + title = self._clearn_html(title) + pattern = re.compile(r'var vid="(\d+)"') + result = re.search(pattern, webpage) + if not result: + logging.info('[Sohu] could not get vid') + return None + vid = result.group(1) + logging.info('vid: %s' % vid) + base_url_1 = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' + url_1 = base_url_1 + vid + logging.info('json url: %s' % url_1) + json_1 = json.loads(urllib2.urlopen(url_1).read()) + # get the highest definition video vid and json infomation. + vids = [] + qualities = ('oriVid', 'superVid', 'highVid', 'norVid') + for vid_name in qualities: + vids.append(json_1['data'][vid_name]) + clearest_vid = 0 + for i, v in enumerate(vids): + if v != 0: + clearest_vid = v + logging.info('quality definition: %s' % qualities[i][:-3]) + break + if not clearest_vid: + logging.warning('could not find valid clearest_vid') + return None + if vid != clearest_vid: + url_1 = '%s%d' % (base_url_1, clearest_vid) + logging.info('highest definition json url: %s' % url_1) + json_1 = json.loads(urllib2.urlopen(url_1).read()) + allot = json_1['allot'] + prot = json_1['prot'] + clipsURL = json_1['data']['clipsURL'] + su = json_1['data']['su'] + num_of_parts = json_1['data']['totalBlocks'] + logging.info('Total parts: %d' % num_of_parts) + base_url_3 = 'http://allot/?prot=prot&file=clipsURL[i]&new=su[i]' + files_info = [] + for i in range(num_of_parts): + middle_url = 'http://%s/?prot=%s&file=%s&new=%s' % (allot, prot, clipsURL[i], su[i]) + logging.info('middle url part %d: %s' % (i, middle_url)) + middle_info = urllib2.urlopen(middle_url).read().split('|') + middle_part_1 = middle_info[0] + download_url = '%s%s?key=%s' % (middle_info[0], su[i], middle_info[3]) + + info = { + 'id': '%s_part%02d' % (video_id, i + 1), + 'title': title, + 'url': download_url, + 'ext': 'mp4', + } + files_info.append(info) + time.sleep(1) + + return files_info -- cgit v1.2.3 From 4ec929dc9b55a2588b4a27e64871c5bfa900bf37 Mon Sep 17 00:00:00 2001 From: huohuarong Date: Sat, 3 Aug 2013 10:29:58 +0800 Subject: use ..utils/clean_html() --- youtube_dl/extractor/sohu.py | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'youtube_dl/extractor/sohu.py') diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index 830814221..cf0ab5478 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -7,7 +7,7 @@ import logging import urllib2 from .common import InfoExtractor -from ..utils import compat_urllib_request +from ..utils import compat_urllib_request, clean_html class SohuIE(InfoExtractor): @@ -22,16 +22,6 @@ class SohuIE(InfoExtractor): }, } - def _clearn_html(self, string): - tags = re.findall(r'<.+?>', string) - for t in tags: - string = string.replace(t, ' ') - for i in range(2): - spaces = re.findall(r'\s+', string) - for s in spaces: - string = string.replace(s, ' ') - string = string.strip() - return string def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -40,7 +30,7 @@ class SohuIE(InfoExtractor): pattern = r'

\n*?(.+?)\n*?

' compiled = re.compile(pattern, re.DOTALL) title = self._search_regex(compiled, webpage, u'video title').strip('\t\n') - title = self._clearn_html(title) + title = clean_html(title) pattern = re.compile(r'var vid="(\d+)"') result = re.search(pattern, webpage) if not result: @@ -93,5 +83,8 @@ class SohuIE(InfoExtractor): } files_info.append(info) time.sleep(1) - + if num_of_parts == 1: + info = files_info[0] + info['id'] = video_id + return info return files_info -- cgit v1.2.3 From b5a6d408181c118bf51382f486a2492643ed74ec Mon Sep 17 00:00:00 2001 From: huohuarong Date: Mon, 5 Aug 2013 22:51:54 +0800 Subject: fix parse title bug --- youtube_dl/extractor/sohu.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'youtube_dl/extractor/sohu.py') diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index cf0ab5478..cd049b6f0 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -27,10 +27,10 @@ class SohuIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) - pattern = r'

\n*?(.+?)\n*?

' + pattern = r'(.+?)' compiled = re.compile(pattern, re.DOTALL) - title = self._search_regex(compiled, webpage, u'video title').strip('\t\n') - title = clean_html(title) + title = self._search_regex(compiled, webpage, u'video title') + title = clean_html(title).split('-')[0].strip() pattern = re.compile(r'var vid="(\d+)"') result = re.search(pattern, webpage) if not result: @@ -41,7 +41,8 @@ class SohuIE(InfoExtractor): base_url_1 = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' url_1 = base_url_1 + vid logging.info('json url: %s' % url_1) - json_1 = json.loads(urllib2.urlopen(url_1).read()) + webpage = self._download_webpage(url_1, vid) + json_1 = json.loads(webpage) # get the highest definition video vid and json infomation. vids = [] qualities = ('oriVid', 'superVid', 'highVid', 'norVid') -- cgit v1.2.3 From d5b00ee6e0ba70fd5d87752e8772fc1c39e4bd59 Mon Sep 17 00:00:00 2001 From: huohuarong Date: Tue, 6 Aug 2013 10:26:57 +0800 Subject: improve sohu extractor --- youtube_dl/extractor/sohu.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'youtube_dl/extractor/sohu.py') diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index cd049b6f0..24fc3a5d7 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -31,6 +31,7 @@ class SohuIE(InfoExtractor): compiled = re.compile(pattern, re.DOTALL) title = self._search_regex(compiled, webpage, u'video title') title = clean_html(title).split('-')[0].strip() + self.to_screen('Title: %s' % title) pattern = re.compile(r'var vid="(\d+)"') result = re.search(pattern, webpage) if not result: @@ -70,6 +71,7 @@ class SohuIE(InfoExtractor): base_url_3 = 'http://allot/?prot=prot&file=clipsURL[i]&new=su[i]' files_info = [] for i in range(num_of_parts): + self.to_screen('Geting json infomation of part %s/%s' % (i + 1, num_of_parts)) middle_url = 'http://%s/?prot=%s&file=%s&new=%s' % (allot, prot, clipsURL[i], su[i]) logging.info('middle url part %d: %s' % (i, middle_url)) middle_info = urllib2.urlopen(middle_url).read().split('|') -- cgit v1.2.3 From f143d86ad2fc0633d8e2da598cf21e73ff0f2872 Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Wed, 28 Aug 2013 13:59:08 +0200 Subject: [sohu] Handle encoding, and fix tests --- youtube_dl/extractor/sohu.py | 131 +++++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 67 deletions(-) (limited to 'youtube_dl/extractor/sohu.py') diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index 24fc3a5d7..77bb0a8dc 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -1,13 +1,10 @@ # encoding: utf-8 -import re import json -import time -import logging -import urllib2 +import re from .common import InfoExtractor -from ..utils import compat_urllib_request, clean_html +from ..utils import ExtractorError class SohuIE(InfoExtractor): @@ -15,79 +12,79 @@ class SohuIE(InfoExtractor): _TEST = { u'url': u'http://tv.sohu.com/20130724/n382479172.shtml#super', - u'file': u'382479172.flv', - u'md5': u'cc84eed6b6fbf0f2f9a8d3cb9da1939b', + u'file': u'382479172.mp4', + u'md5': u'bde8d9a6ffd82c63a1eefaef4eeefec7', u'info_dict': { - u'title': u'The Illest - Far East Movement Riff Raff', + u'title': u'MV:Far East Movement《The Illest》', }, } - def _real_extract(self, url): + + def _fetch_data(vid_id): + base_data_url = u'http://hot.vrs.sohu.com/vrs_flash.action?vid=' + data_url = base_data_url + str(vid_id) + data_json = self._download_webpage( + data_url, video_id, + note=u'Downloading JSON data for ' + str(vid_id)) + return json.loads(data_json) + mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) - pattern = r'(.+?)' - compiled = re.compile(pattern, re.DOTALL) - title = self._search_regex(compiled, webpage, u'video title') - title = clean_html(title).split('-')[0].strip() - self.to_screen('Title: %s' % title) - pattern = re.compile(r'var vid="(\d+)"') - result = re.search(pattern, webpage) - if not result: - logging.info('[Sohu] could not get vid') - return None - vid = result.group(1) - logging.info('vid: %s' % vid) - base_url_1 = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' - url_1 = base_url_1 + vid - logging.info('json url: %s' % url_1) - webpage = self._download_webpage(url_1, vid) - json_1 = json.loads(webpage) - # get the highest definition video vid and json infomation. - vids = [] - qualities = ('oriVid', 'superVid', 'highVid', 'norVid') - for vid_name in qualities: - vids.append(json_1['data'][vid_name]) - clearest_vid = 0 - for i, v in enumerate(vids): - if v != 0: - clearest_vid = v - logging.info('quality definition: %s' % qualities[i][:-3]) - break - if not clearest_vid: - logging.warning('could not find valid clearest_vid') - return None - if vid != clearest_vid: - url_1 = '%s%d' % (base_url_1, clearest_vid) - logging.info('highest definition json url: %s' % url_1) - json_1 = json.loads(urllib2.urlopen(url_1).read()) - allot = json_1['allot'] - prot = json_1['prot'] - clipsURL = json_1['data']['clipsURL'] - su = json_1['data']['su'] - num_of_parts = json_1['data']['totalBlocks'] - logging.info('Total parts: %d' % num_of_parts) - base_url_3 = 'http://allot/?prot=prot&file=clipsURL[i]&new=su[i]' - files_info = [] - for i in range(num_of_parts): - self.to_screen('Geting json infomation of part %s/%s' % (i + 1, num_of_parts)) - middle_url = 'http://%s/?prot=%s&file=%s&new=%s' % (allot, prot, clipsURL[i], su[i]) - logging.info('middle url part %d: %s' % (i, middle_url)) - middle_info = urllib2.urlopen(middle_url).read().split('|') - middle_part_1 = middle_info[0] - download_url = '%s%s?key=%s' % (middle_info[0], su[i], middle_info[3]) + raw_title = self._html_search_regex(r'(?s)(.+?)', + webpage, u'video title') + title = raw_title.partition('-')[0].strip() - info = { + vid = self._html_search_regex(r'var vid="(\d+)"', webpage, + u'video path') + data = _fetch_data(vid) + + QUALITIES = ('ori', 'super', 'high', 'nor') + vid_ids = [data['data'][q + 'Vid'] + for q in QUALITIES + if data['data'][q + 'Vid'] != 0] + if not vid_ids: + raise ExtractorError(u'No formats available for this video') + + # For now, we just pick the highest available quality + vid_id = vid_ids[-1] + + format_data = data if vid == vid_id else _fetch_data(vid_id) + part_count = format_data['data']['totalBlocks'] + allot = format_data['allot'] + prot = format_data['prot'] + clipsURL = format_data['data']['clipsURL'] + su = format_data['data']['su'] + + playlist = [] + for i in range(part_count): + part_url = ('http://%s/?prot=%s&file=%s&new=%s' % + (allot, prot, clipsURL[i], su[i])) + part_str = self._download_webpage( + part_url, video_id, + note=u'Downloading part %d of %d' % (i+1, part_count)) + + part_info = part_str.split('|') + video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) + + video_info = { 'id': '%s_part%02d' % (video_id, i + 1), 'title': title, - 'url': download_url, + 'url': video_url, 'ext': 'mp4', } - files_info.append(info) - time.sleep(1) - if num_of_parts == 1: - info = files_info[0] + playlist.append(video_info) + + if len(playlist) == 1: + info = playlist[0] info['id'] = video_id - return info - return files_info + else: + info = { + '_type': 'playlist', + 'entries': playlist, + 'id': video_id, + } + + return info -- cgit v1.2.3