aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/extractor/googlesearch.py
blob: 5c25642702993f1ec344ce9a0d4967fffedc760a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from __future__ import unicode_literals

import itertools
import re

from .common import SearchInfoExtractor
from ..utils import (
    compat_urllib_parse,
)


class GoogleSearchIE(SearchInfoExtractor):
    IE_DESC = 'Google Video search'
    _MAX_RESULTS = 1000
    IE_NAME = 'video.google:search'
    _SEARCH_KEY = 'gvsearch'

    def _get_n_results(self, query, n):
        """Get a specified number of results for a query"""

        entries = []
        res = {
            '_type': 'playlist',
            'id': query,
            'title': query,
        }

        for pagenum in itertools.count():
            result_url = (
                'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
                % (compat_urllib_parse.quote_plus(query), pagenum * 10))

            webpage = self._download_webpage(
                result_url, 'gvsearch:' + query,
                note='Downloading result page ' + str(pagenum + 1))

            for hit_idx, mobj in enumerate(re.finditer(
                    r'<h3 class="r"><a href="([^"]+)"', webpage)):

                # Skip playlists
                if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
                    continue

                entries.append({
                    '_type': 'url',
                    'url': mobj.group(1)
                })

            if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
                res['entries'] = entries[:n]
                return res