diff options
| -rw-r--r-- | youtube_dl/extractor/youtube.py | 22 | 
1 files changed, 13 insertions, 9 deletions
| diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index c860eedda..a68576547 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -1717,7 +1717,7 @@ class YoutubeUserIE(InfoExtractor):          # page by page until there are no video ids - it means we got          # all of them. -        video_ids = [] +        url_results = []          for pagenum in itertools.count(0):              start_index = pagenum * self._GDATA_PAGE_SIZE + 1 @@ -1735,10 +1735,17 @@ class YoutubeUserIE(InfoExtractor):                  break              # Extract video identifiers -            ids_in_page = [] -            for entry in response['feed']['entry']: -                ids_in_page.append(entry['id']['$t'].split('/')[-1]) -            video_ids.extend(ids_in_page) +            entries = response['feed']['entry'] +            for entry in entries: +                title = entry['title']['$t'] +                video_id = entry['id']['$t'].split('/')[-1] +                url_results.append({ +                    '_type': 'url', +                    'url': video_id, +                    'ie_key': 'Youtube', +                    'id': 'video_id', +                    'title': title, +                })              # A little optimization - if current page is not              # "full", ie. does not contain PAGE_SIZE video ids then @@ -1746,12 +1753,9 @@ class YoutubeUserIE(InfoExtractor):              # are no more ids on further pages - no need to query              # again. -            if len(ids_in_page) < self._GDATA_PAGE_SIZE: +            if len(entries) < self._GDATA_PAGE_SIZE:                  break -        url_results = [ -            self.url_result(video_id, 'Youtube', video_id=video_id) -            for video_id in video_ids]          return self.playlist_result(url_results, playlist_title=username) | 
