aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/extractor/safari.py
blob: 3e494b9605c49dfce8412098c5cf931bd7089277 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# encoding: utf-8
from __future__ import unicode_literals

import re
import json

from .common import InfoExtractor
from .brightcove import BrightcoveIE

from ..compat import (
    compat_urllib_parse,
    compat_urllib_request,
)
from ..utils import (
    ExtractorError,
    smuggle_url,
    std_headers,
)


class SafariBaseIE(InfoExtractor):
    _LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/'
    _SUCCESSFUL_LOGIN_REGEX = r'<a href="/accounts/logout/"[^>]+>Sign Out</a>'
    _ACCOUNT_CREDENTIALS_HINT = ('Use --username and --password options to '
                                 'supply credentials for safaribooksonline.com ')
    _NETRC_MACHINE = 'safaribooksonline'

    LOGGED_IN = False

    def _real_initialize(self):
        # We only need to log in once for courses or individual videos
        if not SafariBaseIE.LOGGED_IN:
            self._login()
            SafariBaseIE.LOGGED_IN = True

    def _login(self):
        (username, password) = self._get_login_info()
        if username is None:
            raise ExtractorError(
                self._ACCOUNT_CREDENTIALS_HINT,
                expected=True)

        headers = std_headers
        if 'Referer' not in headers:
            headers['Referer'] = self._LOGIN_URL

        login_page = self._download_webpage(
            self._LOGIN_URL, None,
            'Downloading login form')

        csrf = self._html_search_regex(
            r"<input +type='hidden' +name='csrfmiddlewaretoken' +value='([^']+)' +/>",
            login_page, 'csrf token')

        login_form = {
            'csrfmiddlewaretoken': csrf,
            'email': username,
            'password1': password,
            'login': 'Sign In',
            'next': '',
        }

        request = compat_urllib_request.Request(
            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers)
        login_page = self._download_webpage(
            request, None, 'Logging in as %s' % username)

        if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
            raise ExtractorError('Login failed; make sure your credentials are correct and '
                                 'try again.', expected=True)

        self.to_screen('Login successful')


class SafariIE(SafariBaseIE):
    IE_NAME = 'safari'
    IE_DESC = 'safaribooksonline.com online video'
    _VALID_URL = (r'https?://(?:www\.)?safaribooksonline\.com/library/view/[^/]+/'
                  '(?P<id>\d+)/(?P<part>part\d+)\.html')
    _TEST = {
        'url': ('https://www.safaribooksonline.com/library/view/'
                'hadoop-fundamentals-livelessons/9780133392838/part00.html'),
        'md5': '5b0c4cc1b3c1ba15dda7344085aa5592',
        'info_dict': {
            'id': '9780133392838',
            'ext': 'mp4',
            'title': 'Introduction',
        }
    }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        part = mobj.group('part')

        webpage = self._download_webpage(url, part)
        bc_url = BrightcoveIE._extract_brightcove_url(webpage)
        if not bc_url:
            raise ExtractorError('Could not extract Brightcove URL from %s' % url, expected=True)

        return {
            '_type': 'url',
            'url': smuggle_url(bc_url, {'Referer': url}),
            'ie_key': 'Brightcove'
        }


class SafariCourseIE(SafariBaseIE):
    IE_NAME = 'safari:course'
    IE_DESC = 'safaribooksonline.com online courses'

    _VALID_URL = (r'https?://(?:www\.)?safaribooksonline\.com/library/view/'
                  '(?P<course_path>[^/]+)/(?P<id>\d+)/?$')

    _API_BASE = 'https://www.safaribooksonline.com/api/v1/book'
    _API_FORMAT = 'json'

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        course_path = mobj.group('course_path')
        course_id = mobj.group('id')

        webpage = self._download_webpage(
            '%s/%s/?override_format=%s' % (self._API_BASE, course_id, self._API_FORMAT),
            course_path, 'Downloading course JSON')

        course_json = json.loads(webpage)

        if 'chapters' not in course_json:
            raise ExtractorError('No chapters found for course %s' % course_id, expected=True)

        num_parts = len(course_json['chapters'])
        parts = ['%02d' % part for part in range(num_parts)]

        entries = [
            self.url_result(
                'https://www.safaribooksonline.com/library/view/%s/%s/part%s.html' % (course_path,
                                                                                      course_id,
                                                                                      part_id),
                'Safari')
            for part_id in parts]

        course_title = course_json['title']

        return self.playlist_result(entries, course_id, course_title)