aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/extractor/googlesearch.py38
1 files changed, 25 insertions, 13 deletions
diff --git a/youtube_dl/extractor/googlesearch.py b/youtube_dl/extractor/googlesearch.py
index f9c88e9b5..5c2564270 100644
--- a/youtube_dl/extractor/googlesearch.py
+++ b/youtube_dl/extractor/googlesearch.py
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
import itertools
import re
@@ -8,32 +10,42 @@ from ..utils import (
class GoogleSearchIE(SearchInfoExtractor):
- IE_DESC = u'Google Video search'
- _MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
+ IE_DESC = 'Google Video search'
_MAX_RESULTS = 1000
- IE_NAME = u'video.google:search'
+ IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
+ entries = []
res = {
'_type': 'playlist',
'id': query,
- 'entries': []
+ 'title': query,
}
- for pagenum in itertools.count(1):
- result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
- webpage = self._download_webpage(result_url, u'gvsearch:' + query,
- note='Downloading result page ' + str(pagenum))
+ for pagenum in itertools.count():
+ result_url = (
+ 'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
+ % (compat_urllib_parse.quote_plus(query), pagenum * 10))
+
+ webpage = self._download_webpage(
+ result_url, 'gvsearch:' + query,
+ note='Downloading result page ' + str(pagenum + 1))
+
+ for hit_idx, mobj in enumerate(re.finditer(
+ r'<h3 class="r"><a href="([^"]+)"', webpage)):
+
+ # Skip playlists
+ if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
+ continue
- for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
- e = {
+ entries.append({
'_type': 'url',
'url': mobj.group(1)
- }
- res['entries'].append(e)
+ })
- if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
+ if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
+ res['entries'] = entries[:n]
return res