diff options
Diffstat (limited to 'youtube_dl/extractor/dailymotion.py')
| -rw-r--r-- | youtube_dl/extractor/dailymotion.py | 236 | 
1 files changed, 179 insertions, 57 deletions
| diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 1a41c0db1..0c5b6617f 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -7,14 +7,13 @@ import itertools  from .common import InfoExtractor -from ..compat import ( -    compat_str, -    compat_urllib_request, -)  from ..utils import ( +    determine_ext, +    error_to_compat_str,      ExtractorError,      int_or_none, -    orderedSet, +    parse_iso8601, +    sanitized_Request,      str_to_int,      unescapeHTML,  ) @@ -24,14 +23,20 @@ class DailymotionBaseInfoExtractor(InfoExtractor):      @staticmethod      def _build_request(url):          """Build a request with the family filter disabled""" -        request = compat_urllib_request.Request(url) +        request = sanitized_Request(url)          request.add_header('Cookie', 'family_filter=off; ff=off')          return request +    def _download_webpage_handle_no_ff(self, url, *args, **kwargs): +        request = self._build_request(url) +        return self._download_webpage_handle(request, *args, **kwargs) + +    def _download_webpage_no_ff(self, url, *args, **kwargs): +        request = self._build_request(url) +        return self._download_webpage(request, *args, **kwargs) -class DailymotionIE(DailymotionBaseInfoExtractor): -    """Information Extractor for Dailymotion""" +class DailymotionIE(DailymotionBaseInfoExtractor):      _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'      IE_NAME = 'dailymotion' @@ -50,10 +55,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):              'info_dict': {                  'id': 'x2iuewm',                  'ext': 'mp4', -                'uploader': 'IGN',                  'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News', -                'upload_date': '20150306', +                'description': 'Several come bundled with the Steam Controller.', +                'thumbnail': 're:^https?:.*\.(?:jpg|png)$',                  'duration': 74, +                'timestamp': 1425657362, +                'upload_date': '20150306', +                'uploader': 'IGN', +                'uploader_id': 'xijv66', +                'age_limit': 0, +                'view_count': int, +                'comment_count': int,              }          },          # Vevo video @@ -82,46 +94,137 @@ class DailymotionIE(DailymotionBaseInfoExtractor):                  'uploader': 'HotWaves1012',                  'age_limit': 18,              } +        }, +        # geo-restricted, player v5 +        { +            'url': 'http://www.dailymotion.com/video/xhza0o', +            'only_matching': True, +        }, +        # with subtitles +        { +            'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news', +            'only_matching': True,          }      ]      def _real_extract(self, url):          video_id = self._match_id(url) -        url = 'https://www.dailymotion.com/video/%s' % video_id -        # Retrieve video webpage to extract further information -        request = self._build_request(url) -        webpage = self._download_webpage(request, video_id) +        webpage = self._download_webpage_no_ff( +            'https://www.dailymotion.com/video/%s' % video_id, video_id) -        # Extract URL, uploader and title from webpage -        self.report_extraction(video_id) +        age_limit = self._rta_search(webpage) + +        description = self._og_search_description(webpage) or self._html_search_meta( +            'description', webpage, 'description') -        # It may just embed a vevo video: -        m_vevo = re.search( +        view_count = str_to_int(self._search_regex( +            [r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"', +             r'video_views_count[^>]+>\s+([\d\.,]+)'], +            webpage, 'view count', fatal=False)) +        comment_count = int_or_none(self._search_regex( +            r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"', +            webpage, 'comment count', fatal=False)) + +        player_v5 = self._search_regex( +            [r'buildPlayer\(({.+?})\);\n',  # See https://github.com/rg3/youtube-dl/issues/7826 +             r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);', +             r'buildPlayer\(({.+?})\);'], +            webpage, 'player v5', default=None) +        if player_v5: +            player = self._parse_json(player_v5, video_id) +            metadata = player['metadata'] + +            self._check_error(metadata) + +            formats = [] +            for quality, media_list in metadata['qualities'].items(): +                for media in media_list: +                    media_url = media.get('url') +                    if not media_url: +                        continue +                    type_ = media.get('type') +                    if type_ == 'application/vnd.lumberjack.manifest': +                        continue +                    ext = determine_ext(media_url) +                    if type_ == 'application/x-mpegURL' or ext == 'm3u8': +                        m3u8_formats = self._extract_m3u8_formats( +                            media_url, video_id, 'mp4', m3u8_id='hls', fatal=False) +                        if m3u8_formats: +                            formats.extend(m3u8_formats) +                    elif type_ == 'application/f4m' or ext == 'f4m': +                        f4m_formats = self._extract_f4m_formats( +                            media_url, video_id, preference=-1, f4m_id='hds', fatal=False) +                        if f4m_formats: +                            formats.extend(f4m_formats) +                    else: +                        f = { +                            'url': media_url, +                            'format_id': quality, +                        } +                        m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url) +                        if m: +                            f.update({ +                                'width': int(m.group('width')), +                                'height': int(m.group('height')), +                            }) +                        formats.append(f) +            self._sort_formats(formats) + +            title = metadata['title'] +            duration = int_or_none(metadata.get('duration')) +            timestamp = int_or_none(metadata.get('created_time')) +            thumbnail = metadata.get('poster_url') +            uploader = metadata.get('owner', {}).get('screenname') +            uploader_id = metadata.get('owner', {}).get('id') + +            subtitles = {} +            subtitles_data = metadata.get('subtitles', {}).get('data', {}) +            if subtitles_data and isinstance(subtitles_data, dict): +                for subtitle_lang, subtitle in subtitles_data.items(): +                    subtitles[subtitle_lang] = [{ +                        'ext': determine_ext(subtitle_url), +                        'url': subtitle_url, +                    } for subtitle_url in subtitle.get('urls', [])] + +            return { +                'id': video_id, +                'title': title, +                'description': description, +                'thumbnail': thumbnail, +                'duration': duration, +                'timestamp': timestamp, +                'uploader': uploader, +                'uploader_id': uploader_id, +                'age_limit': age_limit, +                'view_count': view_count, +                'comment_count': comment_count, +                'formats': formats, +                'subtitles': subtitles, +            } + +        # vevo embed +        vevo_id = self._search_regex(              r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)', -            webpage) -        if m_vevo is not None: -            vevo_id = m_vevo.group('id') -            self.to_screen('Vevo video detected: %s' % vevo_id) -            return self.url_result('vevo:%s' % vevo_id, ie='Vevo') +            webpage, 'vevo embed', default=None) +        if vevo_id: +            return self.url_result('vevo:%s' % vevo_id, 'Vevo') -        age_limit = self._rta_search(webpage) +        # fallback old player +        embed_page = self._download_webpage_no_ff( +            'https://www.dailymotion.com/embed/video/%s' % video_id, +            video_id, 'Downloading embed page') -        video_upload_date = None -        mobj = re.search(r'<meta property="video:release_date" content="([0-9]{4})-([0-9]{2})-([0-9]{2}).+?"/>', webpage) -        if mobj is not None: -            video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) - -        embed_url = 'https://www.dailymotion.com/embed/video/%s' % video_id -        embed_request = self._build_request(embed_url) -        embed_page = self._download_webpage( -            embed_request, video_id, 'Downloading embed page') -        info = self._search_regex(r'var info = ({.*?}),$', embed_page, -                                  'video info', flags=re.MULTILINE) -        info = json.loads(info) -        if info.get('error') is not None: -            msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title'] -            raise ExtractorError(msg, expected=True) +        timestamp = parse_iso8601(self._html_search_meta( +            'video:release_date', webpage, 'upload date')) + +        info = self._parse_json( +            self._search_regex( +                r'var info = ({.*?}),$', embed_page, +                'video info', flags=re.MULTILINE), +            video_id) + +        self._check_error(info)          formats = []          for (key, format_id) in self._FORMATS: @@ -139,16 +242,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):                      'width': width,                      'height': height,                  }) -        if not formats: -            raise ExtractorError('Unable to extract video URL') +        self._sort_formats(formats)          # subtitles          video_subtitles = self.extract_subtitles(video_id, webpage) -        view_count = str_to_int(self._search_regex( -            r'video_views_count[^>]+>\s+([\d\.,]+)', -            webpage, 'view count', fatal=False)) -          title = self._og_search_title(webpage, default=None)          if title is None:              title = self._html_search_regex( @@ -159,8 +257,9 @@ class DailymotionIE(DailymotionBaseInfoExtractor):              'id': video_id,              'formats': formats,              'uploader': info['owner.screenname'], -            'upload_date': video_upload_date, +            'timestamp': timestamp,              'title': title, +            'description': description,              'subtitles': video_subtitles,              'thumbnail': info['thumbnail_url'],              'age_limit': age_limit, @@ -168,13 +267,18 @@ class DailymotionIE(DailymotionBaseInfoExtractor):              'duration': info['duration']          } +    def _check_error(self, info): +        if info.get('error') is not None: +            raise ExtractorError( +                '%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True) +      def _get_subtitles(self, video_id, webpage):          try:              sub_list = self._download_webpage(                  'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,                  video_id, note=False)          except ExtractorError as err: -            self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err)) +            self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))              return {}          info = json.loads(sub_list)          if (info['total'] > 0): @@ -199,18 +303,26 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):      }]      def _extract_entries(self, id): -        video_ids = [] +        video_ids = set() +        processed_urls = set()          for pagenum in itertools.count(1): -            request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum)) -            webpage = self._download_webpage(request, -                                             id, 'Downloading page %s' % pagenum) +            page_url = self._PAGE_TEMPLATE % (id, pagenum) +            webpage, urlh = self._download_webpage_handle_no_ff( +                page_url, id, 'Downloading page %s' % pagenum) +            if urlh.geturl() in processed_urls: +                self.report_warning('Stopped at duplicated page %s, which is the same as %s' % ( +                    page_url, urlh.geturl()), id) +                break + +            processed_urls.add(urlh.geturl()) -            video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage)) +            for video_id in re.findall(r'data-xid="(.+?)"', webpage): +                if video_id not in video_ids: +                    yield self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') +                    video_ids.add(video_id)              if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:                  break -        return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') -                for video_id in orderedSet(video_ids)]      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) @@ -227,7 +339,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):  class DailymotionUserIE(DailymotionPlaylistIE):      IE_NAME = 'dailymotion:user' -    _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?:(?:old/)?user/)?(?P<user>[^/]+)$' +    _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'      _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'      _TESTS = [{          'url': 'https://www.dailymotion.com/user/nqtv', @@ -236,6 +348,17 @@ class DailymotionUserIE(DailymotionPlaylistIE):              'title': 'Rémi Gaillard',          },          'playlist_mincount': 100, +    }, { +        'url': 'http://www.dailymotion.com/user/UnderProject', +        'info_dict': { +            'id': 'UnderProject', +            'title': 'UnderProject', +        }, +        'playlist_mincount': 1800, +        'expected_warnings': [ +            'Stopped at duplicated page', +        ], +        'skip': 'Takes too long time',      }]      def _real_extract(self, url): @@ -286,8 +409,7 @@ class DailymotionCloudIE(DailymotionBaseInfoExtractor):      def _real_extract(self, url):          video_id = self._match_id(url) -        request = self._build_request(url) -        webpage = self._download_webpage(request, video_id) +        webpage = self._download_webpage_no_ff(url, video_id)          title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title') | 
