From 3182f3e2dc62cf918fa12f578e2bbfc952862320 Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Fri, 25 Jul 2014 11:46:51 +0200 Subject: [justin.tv] Fix page reporting (#3352) youtube-dl -j http://www.twitch.tv/fang_i3anger still fails though. --- youtube_dl/extractor/justintv.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'youtube_dl') diff --git a/youtube_dl/extractor/justintv.py b/youtube_dl/extractor/justintv.py index 7083db12e..27017e89f 100644 --- a/youtube_dl/extractor/justintv.py +++ b/youtube_dl/extractor/justintv.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import itertools import json import os import re @@ -43,10 +44,11 @@ class JustinTVIE(InfoExtractor): } # Return count of items, list of *valid* items - def _parse_page(self, url, video_id): - info_json = self._download_webpage(url, video_id, - 'Downloading video info JSON', - 'unable to download video info JSON') + def _parse_page(self, url, video_id, counter): + info_json = self._download_webpage( + url, video_id, + 'Downloading video info JSON on page %d' % counter, + 'Unable to download video info JSON %d' % counter) response = json.loads(info_json) if type(response) != list: @@ -138,11 +140,10 @@ class JustinTVIE(InfoExtractor): entries = [] offset = 0 limit = self._JUSTIN_PAGE_LIMIT - while True: - if paged: - self.report_download_page(video_id, offset) + for counter in itertools.count(1): page_url = api + ('?offset=%d&limit=%d' % (offset, limit)) - page_count, page_info = self._parse_page(page_url, video_id) + page_count, page_info = self._parse_page( + page_url, video_id, counter) entries.extend(page_info) if not paged or page_count != limit: break -- cgit v1.2.3