diff options
| author | remitamine <remitamine@gmail.com> | 2015-12-17 20:56:54 +0100 | 
|---|---|---|
| committer | remitamine <remitamine@gmail.com> | 2015-12-17 20:56:54 +0100 | 
| commit | 64ccbf18c0e4849618d82add2ff13879f9db67ab (patch) | |
| tree | 07063475dda7bd75ce84d9a6f75b521cd72589bb | |
| parent | 35e22b6b32d0e662d486c9be8c76f6ea86f7cdd4 (diff) | |
[livestream] improve extraction, add support for live streams and extract more info and formats
| -rw-r--r-- | youtube_dl/extractor/livestream.py | 278 | 
1 files changed, 168 insertions, 110 deletions
| diff --git a/youtube_dl/extractor/livestream.py b/youtube_dl/extractor/livestream.py index 6d7733e41..b95c23c8c 100644 --- a/youtube_dl/extractor/livestream.py +++ b/youtube_dl/extractor/livestream.py @@ -1,27 +1,29 @@  from __future__ import unicode_literals  import re -import json  import itertools  from .common import InfoExtractor  from ..compat import (      compat_str, -    compat_urllib_parse_urlparse,      compat_urlparse,  )  from ..utils import ( -    ExtractorError,      find_xpath_attr, -    int_or_none, -    orderedSet, +    xpath_attr,      xpath_with_ns, +    xpath_text, +    orderedSet, +    int_or_none, +    float_or_none, +    parse_iso8601, +    determine_ext,  )  class LivestreamIE(InfoExtractor):      IE_NAME = 'livestream' -    _VALID_URL = r'https?://(?:new\.)?livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])' +    _VALID_URL = r'https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?'      _TESTS = [{          'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',          'md5': '53274c76ba7754fb0e8d072716f2292b', @@ -29,7 +31,9 @@ class LivestreamIE(InfoExtractor):              'id': '4719370',              'ext': 'mp4',              'title': 'Live from Webster Hall NYC', +            'timestamp': 1350008072,              'upload_date': '20121012', +            'duration': 5968.0,              'like_count': int,              'view_count': int,              'thumbnail': 're:^http://.*\.jpg$' @@ -55,39 +59,19 @@ class LivestreamIE(InfoExtractor):          'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',          'only_matching': True,      }] +    _API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s' + +    def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): +        base_ele = find_xpath_attr(smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase') +        base = base_ele.get('content') if base_ele else 'http://livestreamvod-f.akamaihd.net/' -    def _parse_smil(self, video_id, smil_url):          formats = [] -        _SWITCH_XPATH = ( -            './/{http://www.w3.org/2001/SMIL20/Language}body/' -            '{http://www.w3.org/2001/SMIL20/Language}switch') -        smil_doc = self._download_xml( -            smil_url, video_id, -            note='Downloading SMIL information', -            errnote='Unable to download SMIL information', -            fatal=False) -        if smil_doc is False:  # Download failed -            return formats -        title_node = find_xpath_attr( -            smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta', -            'name', 'title') -        if title_node is None: -            self.report_warning('Cannot find SMIL id') -            switch_node = smil_doc.find(_SWITCH_XPATH) -        else: -            title_id = title_node.attrib['content'] -            switch_node = find_xpath_attr( -                smil_doc, _SWITCH_XPATH, 'id', title_id) -        if switch_node is None: -            raise ExtractorError('Cannot find switch node') -        video_nodes = switch_node.findall( -            '{http://www.w3.org/2001/SMIL20/Language}video') +        video_nodes = smil.findall(self._xpath_ns('.//video', namespace))          for vn in video_nodes: -            tbr = int_or_none(vn.attrib.get('system-bitrate')) +            tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000)              furl = ( -                'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' % -                (vn.attrib['src'])) +                '%s%s?v=3.0.3&fp=WIN%%2014,0,0,145' % (base, vn.attrib['src']))              if 'clipBegin' in vn.attrib:                  furl += '&ssek=' + vn.attrib['clipBegin']              formats.append({ @@ -106,89 +90,136 @@ class LivestreamIE(InfoExtractor):              ('sd', 'progressive_url'),              ('hd', 'progressive_url_hd'),          ) -        formats = [{ -            'format_id': format_id, -            'url': video_data[key], -            'quality': i + 1, -        } for i, (format_id, key) in enumerate(FORMAT_KEYS) -            if video_data.get(key)] + +        formats = [] +        for format_id, key in FORMAT_KEYS: +            video_url = video_data.get(key) +            if video_url: +                ext = determine_ext(video_url) +                bitrate = int_or_none(self._search_regex(r'(\d+)\.%s' % ext, video_url, 'bitrate', default=None)) +                formats.append({ +                    'url': video_url, +                    'format_id': format_id, +                    'tbr': bitrate, +                    'ext': ext, +                })          smil_url = video_data.get('smil_url')          if smil_url: -            formats.extend(self._parse_smil(video_id, smil_url)) +            smil_formats = self._extract_smil_formats(smil_url, video_id) +            if smil_formats: +                formats.extend(smil_formats) + +        m3u8_url = video_data.get('m3u8_url') +        if m3u8_url: +            m3u8_formats = self._extract_m3u8_formats( +                m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) +            if m3u8_formats: +                formats.extend(m3u8_formats) + +        f4m_url = video_data.get('f4m_url') +        if f4m_url: +            f4m_formats = self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False) +            if f4m_formats: +                formats.extend(f4m_formats)          self._sort_formats(formats) +        comments = [{ +            'author_id': comment.get('author_id'), +            'author': comment.get('author', {}).get('full_name'), +            'id': comment.get('id'), +            'text': comment['text'], +            'timestamp': parse_iso8601(comment.get('created_at')), +        } for comment in video_data.get('comments', {}).get('data', [])] +          return {              'id': video_id,              'formats': formats,              'title': video_data['caption'], +            'description': video_data.get('description'),              'thumbnail': video_data.get('thumbnail_url'), -            'upload_date': video_data['updated_at'].replace('-', '')[:8], +            'duration': float_or_none(video_data.get('duration'), 1000), +            'timestamp': parse_iso8601(video_data.get('publish_at')),              'like_count': video_data.get('likes', {}).get('total'), +            'comment_count': video_data.get('comments', {}).get('total'),              'view_count': video_data.get('views'), +            'comments': comments, +        } + +    def _extract_stream_info(self, stream_info): +        broadcast_id = stream_info['broadcast_id'] +        is_live = stream_info.get('is_live') + +        formats = [] +        smil_url = stream_info.get('play_url') +        if smil_url: +            smil_formats = self._extract_smil_formats(smil_url, broadcast_id) +            if smil_formats: +                formats.extend(smil_formats) + +        m3u8_url = stream_info.get('m3u8_url') +        if m3u8_url: +            m3u8_formats = self._extract_m3u8_formats( +                m3u8_url, broadcast_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) +            if m3u8_formats: +                formats.extend(m3u8_formats) + +        rtsp_url = stream_info.get('rtsp_url') +        if rtsp_url: +            formats.append({ +                'url': rtsp_url, +                'format_id': 'rtsp', +            }) +        self._sort_formats(formats) + +        return { +            'id': broadcast_id, +            'formats': formats, +            'title': self._live_title(stream_info['stream_title']) if is_live else stream_info['stream_title'], +            'thumbnail': stream_info.get('thumbnail_url'), +            'is_live': is_live,          } -    def _extract_event(self, info): -        event_id = compat_str(info['id']) -        account = compat_str(info['owner_account_id']) -        root_url = ( -            'https://new.livestream.com/api/accounts/{account}/events/{event}/' -            'feed.json'.format(account=account, event=event_id)) - -        def _extract_videos(): -            last_video = None -            for i in itertools.count(1): -                if last_video is None: -                    info_url = root_url -                else: -                    info_url = '{root}?&id={id}&newer=-1&type=video'.format( -                        root=root_url, id=last_video) -                videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data'] -                videos_info = [v['data'] for v in videos_info if v['type'] == 'video'] -                if not videos_info: -                    break -                for v in videos_info: -                    yield self._extract_video_info(v) -                last_video = videos_info[-1]['id'] -        return self.playlist_result(_extract_videos(), event_id, info['full_name']) +    def _extract_event(self, event_data): +        event_id = compat_str(event_data['id']) +        account_id = compat_str(event_data['owner_account_id']) +        feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json' + +        stream_info = event_data.get('stream_info') +        if stream_info: +            return self._extract_stream_info(stream_info) + +        last_video = None +        entries = [] +        for i in itertools.count(1): +            if last_video is None: +                info_url = feed_root_url +            else: +                info_url = '{root}?&id={id}&newer=-1&type=video'.format( +                    root=feed_root_url, id=last_video) +            videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data'] +            videos_info = [v['data'] for v in videos_info if v['type'] == 'video'] +            if not videos_info: +                break +            for v in videos_info: +                entries.append(self.url_result( +                    'http://livestream.com/accounts/%s/events/%s/videos/%s' % (account_id, event_id, v['id']), +                    'Livestream', v['id'], v['caption'])) +            last_video = videos_info[-1]['id'] +        return self.playlist_result(entries, event_id, event_data['full_name'])      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('id') -        event_name = mobj.group('event_name') -        webpage = self._download_webpage(url, video_id or event_name) - -        og_video = self._og_search_video_url( -            webpage, 'player url', fatal=False, default=None) -        if og_video is not None: -            query_str = compat_urllib_parse_urlparse(og_video).query -            query = compat_urlparse.parse_qs(query_str) -            if 'play_url' in query: -                api_url = query['play_url'][0].replace('.smil', '') -                info = json.loads(self._download_webpage( -                    api_url, video_id, 'Downloading video info')) -                return self._extract_video_info(info) - -        config_json = self._search_regex( -            r'window.config = ({.*?});', webpage, 'window config') -        info = json.loads(config_json)['event'] - -        def is_relevant(vdata, vid): -            result = vdata['type'] == 'video' -            if video_id is not None: -                result = result and compat_str(vdata['data']['id']) == vid -            return result - -        if video_id is None: -            # This is an event page: -            return self._extract_event(info) +        event = mobj.group('event_id') or mobj.group('event_name') +        account = mobj.group('account_id') or mobj.group('account_name') +        api_url = self._API_URL_TEMPLATE % (account, event) +        if video_id: +            video_data = self._download_json(api_url + '/videos/%s' % video_id, video_id) +            return self._extract_video_info(video_data)          else: -            videos = [self._extract_video_info(video_data['data']) -                      for video_data in info['feed']['data'] -                      if is_relevant(video_data, video_id)] -            if not videos: -                raise ExtractorError('Cannot find video %s' % video_id) -            return videos[0] +            event_data = self._download_json(api_url, video_id) +            return self._extract_event(event_data)  # The original version of Livestream uses a different system @@ -204,6 +235,8 @@ class LivestreamOriginalIE(InfoExtractor):              'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',              'ext': 'mp4',              'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital', +            'duration': 771.301, +            'view_count': int,          },      }, {          'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3', @@ -220,15 +253,42 @@ class LivestreamOriginalIE(InfoExtractor):          # this url is used on mobile devices          stream_url = 'http://x{0}x.api.channel.livestream.com/3.0/getstream.json?id={1}'.format(user, video_id)          stream_info = self._download_json(stream_url, video_id) +        is_live = stream_info.get('isLive')          item = info.find('channel').find('item') -        ns = {'media': 'http://search.yahoo.com/mrss'} -        thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url'] +        media_ns = {'media': 'http://search.yahoo.com/mrss'} +        thumbnail_url = xpath_attr(item, xpath_with_ns('media:thumbnail', media_ns), 'url') +        duration = float_or_none(xpath_attr(item, xpath_with_ns('media:content', media_ns), 'duration')) +        ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'} +        view_count = int_or_none(xpath_text(item, xpath_with_ns('ls:viewsCount', ls_ns))) + +        formats = [{ +            'url': stream_info['progressiveUrl'], +            'format_id': 'http', +        }] + +        m3u8_url = stream_info.get('httpUrl') +        if m3u8_url: +            m3u8_formats = self._extract_m3u8_formats( +                m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) +            if m3u8_formats: +                formats.extend(m3u8_formats) + +        rtsp_url = stream_info.get('rtspUrl') +        if rtsp_url: +            formats.append({ +                'url': rtsp_url, +                'format_id': 'rtsp', +            }) +        self._sort_formats(formats)          return {              'id': video_id, -            'title': item.find('title').text, -            'url': stream_info['progressiveUrl'], +            'title': self._live_title(xpath_text(item, 'title')) if is_live else xpath_text(item, 'title'), +            'formats': formats,              'thumbnail': thumbnail_url, +            'duration': duration, +            'view_count': view_count, +            'is_live': is_live,          }      def _extract_folder(self, url, folder_id): @@ -239,14 +299,12 @@ class LivestreamOriginalIE(InfoExtractor):                  <a\s+href="(?=https?://livestre\.am/)              )([^"]+)"''', webpage)) -        return { -            '_type': 'playlist', -            'id': folder_id, -            'entries': [{ -                '_type': 'url', -                'url': compat_urlparse.urljoin(url, p), -            } for p in paths], -        } +        entries = [{ +            '_type': 'url', +            'url': compat_urlparse.urljoin(url, p), +        } for p in paths] + +        return self.playlist_result(entries, folder_id)      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) | 
