diff options
Diffstat (limited to 'youtube_dl/extractor/mixcloud.py')
| -rw-r--r-- | youtube_dl/extractor/mixcloud.py | 69 | 
1 files changed, 47 insertions, 22 deletions
| diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py index 1831c6749..21aea0c55 100644 --- a/youtube_dl/extractor/mixcloud.py +++ b/youtube_dl/extractor/mixcloud.py @@ -1,6 +1,7 @@  from __future__ import unicode_literals  import re +import itertools  from .common import InfoExtractor  from ..compat import ( @@ -10,7 +11,6 @@ from ..utils import (      ExtractorError,      HEADRequest,      str_to_int, -    parse_iso8601,  ) @@ -27,8 +27,6 @@ class MixcloudIE(InfoExtractor):              'description': 'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.',              'uploader': 'Daniel Holbach',              'uploader_id': 'dholbach', -            'upload_date': '20111115', -            'timestamp': 1321359578,              'thumbnail': 're:https?://.*\.jpg',              'view_count': int,              'like_count': int, @@ -37,31 +35,30 @@ class MixcloudIE(InfoExtractor):          'url': 'http://www.mixcloud.com/gillespeterson/caribou-7-inch-vinyl-mix-chat/',          'info_dict': {              'id': 'gillespeterson-caribou-7-inch-vinyl-mix-chat', -            'ext': 'm4a', -            'title': 'Electric Relaxation vol. 3', +            'ext': 'mp3', +            'title': 'Caribou 7 inch Vinyl Mix & Chat',              'description': 'md5:2b8aec6adce69f9d41724647c65875e8', -            'uploader': 'Daniel Drumz', +            'uploader': 'Gilles Peterson Worldwide',              'uploader_id': 'gillespeterson', -            'thumbnail': 're:https?://.*\.jpg', +            'thumbnail': 're:https?://.*/images/',              'view_count': int,              'like_count': int,          },      }] -    def _get_url(self, track_id, template_url): -        server_count = 30 -        for i in range(server_count): -            url = template_url % i +    def _get_url(self, track_id, template_url, server_number): +        boundaries = (1, 30) +        for nr in server_numbers(server_number, boundaries): +            url = template_url % nr              try:                  # We only want to know if the request succeed                  # don't download the whole file                  self._request_webpage(                      HEADRequest(url), track_id, -                    'Checking URL %d/%d ...' % (i + 1, server_count + 1)) +                    'Checking URL %d/%d ...' % (nr, boundaries[-1]))                  return url              except ExtractorError:                  pass -          return None      def _real_extract(self, url): @@ -75,17 +72,18 @@ class MixcloudIE(InfoExtractor):          preview_url = self._search_regex(              r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url')          song_url = preview_url.replace('/previews/', '/c/originals/') +        server_number = int(self._search_regex(r'stream(\d+)', song_url, 'server number'))          template_url = re.sub(r'(stream\d*)', 'stream%d', song_url) -        final_song_url = self._get_url(track_id, template_url) +        final_song_url = self._get_url(track_id, template_url, server_number)          if final_song_url is None:              self.to_screen('Trying with m4a extension')              template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/') -            final_song_url = self._get_url(track_id, template_url) +            final_song_url = self._get_url(track_id, template_url, server_number)          if final_song_url is None:              raise ExtractorError('Unable to extract track url')          PREFIX = ( -            r'<span class="play-button[^"]*?"' +            r'm-play-on-spacebar[^>]+'              r'(?:\s+[a-zA-Z0-9-]+(?:="[^"]+")?)*?\s+')          title = self._html_search_regex(              PREFIX + r'm-title="([^"]+)"', webpage, 'title') @@ -99,16 +97,12 @@ class MixcloudIE(InfoExtractor):              r'\s+"profile": "([^"]+)",', webpage, 'uploader id', fatal=False)          description = self._og_search_description(webpage)          like_count = str_to_int(self._search_regex( -            [r'<meta itemprop="interactionCount" content="UserLikes:([0-9]+)"', -             r'/favorites/?">([0-9]+)<'], +            r'\bbutton-favorite\b.+m-ajax-toggle-count="([^"]+)"',              webpage, 'like count', fatal=False))          view_count = str_to_int(self._search_regex(              [r'<meta itemprop="interactionCount" content="UserPlays:([0-9]+)"',               r'/listeners/?">([0-9,.]+)</a>'],              webpage, 'play count', fatal=False)) -        timestamp = parse_iso8601(self._search_regex( -            r'<time itemprop="dateCreated" datetime="([^"]+)">', -            webpage, 'upload date', default=None))          return {              'id': track_id, @@ -118,7 +112,38 @@ class MixcloudIE(InfoExtractor):              'thumbnail': thumbnail,              'uploader': uploader,              'uploader_id': uploader_id, -            'timestamp': timestamp,              'view_count': view_count,              'like_count': like_count,          } + + +def server_numbers(first, boundaries): +    """ Server numbers to try in descending order of probable availability. +    Starting from first (i.e. the number of the server hosting the preview file) +    and going further and further up to the higher boundary and down to the +    lower one in an alternating fashion. Namely: + +        server_numbers(2, (1, 5)) + +        # Where the preview server is 2, min number is 1 and max is 5. +        # Yields: 2, 3, 1, 4, 5 + +    Why not random numbers or increasing sequences? Since from what I've seen, +    full length files seem to be hosted on servers whose number is closer to +    that of the preview; to be confirmed. +    """ +    zip_longest = getattr(itertools, 'zip_longest', None) +    if zip_longest is None: +        # python 2.x +        zip_longest = itertools.izip_longest + +    if len(boundaries) != 2: +        raise ValueError("boundaries should be a two-element tuple") +    min, max = boundaries +    highs = range(first + 1, max + 1) +    lows = range(first - 1, min - 1, -1) +    rest = filter( +        None, itertools.chain.from_iterable(zip_longest(highs, lows))) +    yield first +    for n in rest: +        yield n | 
