diff options
| -rw-r--r-- | test/test_youtube_lists.py | 20 | ||||
| -rwxr-xr-x | youtube_dl/InfoExtractors.py | 103 | 
2 files changed, 71 insertions, 52 deletions
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py index 3044e0852..69b0f4447 100644 --- a/test/test_youtube_lists.py +++ b/test/test_youtube_lists.py @@ -8,7 +8,7 @@ import json  import os  sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE +from youtube_dl.InfoExtractors import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE  from youtube_dl.utils import *  PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") @@ -38,11 +38,8 @@ class TestYoutubeLists(unittest.TestCase):          DL = FakeDownloader()          IE = YoutubePlaylistIE(DL)          IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') -        self.assertEqual(DL.result, [ -            ['http://www.youtube.com/watch?v=bV9L5Ht9LgY'], -            ['http://www.youtube.com/watch?v=FXxLjLQi3Fg'], -            ['http://www.youtube.com/watch?v=tU3Bgo5qJZE'] -        ]) +        self.assertEqual(map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result), +            [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE' ])      def test_youtube_playlist_long(self):          DL = FakeDownloader() @@ -50,14 +47,21 @@ class TestYoutubeLists(unittest.TestCase):          IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')          self.assertTrue(len(DL.result) >= 799) +    def test_youtube_playlist_with_deleted(self): +        DL = FakeDownloader() +        IE = YoutubePlaylistIE(DL) +        IE.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') +        self.assertFalse('pElCt5oNDuI' in map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result)) +        self.assertFalse('KdPEApIVdWM' in map(lambda x: YoutubeIE()._extract_id(x[0]), DL.result)) +      def test_youtube_course(self):          DL = FakeDownloader()          IE = YoutubePlaylistIE(DL)          # TODO find a > 100 (paginating?) videos course          IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') -        self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs']) +        self.assertEqual(YoutubeIE()._extract_id(DL.result[0][0]), 'j9WZyLZCBzs')          self.assertEqual(len(DL.result), 25) -        self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0']) +        self.assertEqual(YoutubeIE()._extract_id(DL.result[-1][0]), 'rYefUsYuEp0')      def test_youtube_channel(self):          # I give up, please find a channel that does paginate and test this like test_youtube_playlist_long diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index d661d517d..021579ce0 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -15,6 +15,7 @@ import email.utils  import xml.etree.ElementTree  import random  import math +import operator  from .utils import * @@ -1662,22 +1663,40 @@ class YahooSearchIE(InfoExtractor):  class YoutubePlaylistIE(InfoExtractor):      """Information Extractor for YouTube playlists.""" -    _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*' -    _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' -    _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&([^&"]+&)*list=.*?%s' -    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" +    _VALID_URL = r"""(?: +                        (?:https?://)? +                        (?:\w+\.)? +                        youtube\.com/ +                        (?: +                           (?:course|view_play_list|my_playlists|artist|playlist) +                           \? .*? (p|a|list)= +                        |  user/.*?/user/ +                        |  p/ +                        |  user/.*?#[pg]/c/ +                        ) +                        (?:PL|EC)? +                     |PL|EC) +                     ([0-9A-Za-z-_]{10,}) +                     (?:/.*?/([0-9A-Za-z_-]+))? +                     .*""" +    _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json' +    _MAX_RESULTS = 50      IE_NAME = u'youtube:playlist'      def __init__(self, downloader=None):          InfoExtractor.__init__(self, downloader) +    def suitable(self, url): +        """Receives a URL and returns True if suitable for this IE.""" +        return re.match(self._VALID_URL, url, re.VERBOSE) is not None +      def report_download_page(self, playlist_id, pagenum):          """Report attempt to download playlist page with given number."""          self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))      def _real_extract(self, url):          # Extract playlist id -        mobj = re.match(self._VALID_URL, url) +        mobj = re.match(self._VALID_URL, url, re.VERBOSE)          if mobj is None:              self._downloader.trouble(u'ERROR: invalid url: %s' % url)              return @@ -1687,55 +1706,51 @@ class YoutubePlaylistIE(InfoExtractor):              self._downloader.download([mobj.group(3)])              return -        # Download playlist pages -        # prefix is 'p' as default for playlists but there are other types that need extra care -        playlist_prefix = mobj.group(1) -        if playlist_prefix == 'a': -            playlist_access = 'artist' -        else: -            playlist_prefix = 'p' -            playlist_access = 'view_play_list' +        # Download playlist videos from API          playlist_id = mobj.group(2) -        video_ids = [] -        pagenum = 1 +        page_num = 1 +        videos = []          while True: -            self.report_download_page(playlist_id, pagenum) -            url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) -            request = compat_urllib_request.Request(url) +            self.report_download_page(playlist_id, page_num) + +            url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1)              try: -                page = compat_urllib_request.urlopen(request).read().decode('utf-8') +                page = compat_urllib_request.urlopen(url).read().decode('utf8')              except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:                  self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))                  return -            # Extract video identifiers -            ids_in_page = [] -            for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): -                if mobj.group(1) not in ids_in_page: -                    ids_in_page.append(mobj.group(1)) -            video_ids.extend(ids_in_page) +            try: +                response = json.loads(page) +            except ValueError as err: +                self._downloader.trouble(u'ERROR: Invalid JSON in API response: ' + compat_str(err)) +                return -            if self._MORE_PAGES_INDICATOR not in page: +            videos += [(entry['yt$position']['$t'], entry['content']['src']) for entry in response['feed']['entry']] + +            if len(response['feed']['entry']) < self._MAX_RESULTS:                  break -            pagenum = pagenum + 1 +            page_num += 1 -        total = len(video_ids) +        videos = map(operator.itemgetter(1), sorted(videos)) + +        total = len(videos)          playliststart = self._downloader.params.get('playliststart', 1) - 1          playlistend = self._downloader.params.get('playlistend', -1)          if playlistend == -1: -            video_ids = video_ids[playliststart:] +            videos = videos[playliststart:]          else: -            video_ids = video_ids[playliststart:playlistend] +            videos = videos[playliststart:playlistend] -        if len(video_ids) == total: +        if len(videos) == total:              self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total))          else: -            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids))) +            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(videos))) -        for id in video_ids: -            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) +        for video in videos: +            self._downloader.download([video])          return @@ -3605,9 +3620,9 @@ class TweetReelIE(InfoExtractor):              'upload_date': upload_date          }          return [info] -         +  class SteamIE(InfoExtractor): -    _VALID_URL = r"""http://store.steampowered.com/  +    _VALID_URL = r"""http://store.steampowered.com/                  (?P<urltype>video|app)/ #If the page is only for videos or for a game                  (?P<gameID>\d+)/?                  (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID @@ -3707,7 +3722,7 @@ class RBMARadioIE(InfoExtractor):  class YouPornIE(InfoExtractor):      """Information extractor for youporn.com."""      _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)' -    +      def _print_formats(self, formats):          """Print all available formats"""          print(u'Available formats:') @@ -3769,8 +3784,8 @@ class YouPornIE(InfoExtractor):          links = re.findall(LINK_RE, download_list_html)          if(len(links) == 0):              raise ExtractorError(u'ERROR: no known formats available for video') -         -        self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))    + +        self._downloader.to_screen(u'[youporn] Links found: %d' % len(links))          formats = []          for link in links: @@ -3821,7 +3836,7 @@ class YouPornIE(InfoExtractor):                  return              return [format] -         +  class PornotubeIE(InfoExtractor):      """Information extractor for pornotube.com.""" @@ -3893,7 +3908,7 @@ class YouJizzIE(InfoExtractor):          embed_page_url = result.group(0).strip()          video_id = result.group('videoid') -     +          webpage = self._download_webpage(embed_page_url, video_id)          # Get the video URL @@ -4053,7 +4068,7 @@ class TEDIE(InfoExtractor):  class MySpassIE(InfoExtractor):      _VALID_URL = r'http://www.myspass.de/.*' -     +      def _real_extract(self, url):          META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s' @@ -4063,12 +4078,12 @@ class MySpassIE(InfoExtractor):          url_parent_path, video_id = os.path.split(url_path)          if not video_id:              _, video_id = os.path.split(url_parent_path) -         +          # get metadata          metadata_url = META_DATA_URL_TEMPLATE % video_id          metadata_text = self._download_webpage(metadata_url, video_id)          metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8')) -         +          # extract values from metadata          url_flv_el = metadata.find('url_flv')          if url_flv_el is None:  | 
