diff options
77 files changed, 227 insertions, 223 deletions
| diff --git a/devscripts/buildserver.py b/devscripts/buildserver.py index 42ee2b5cb..7c2f49f8b 100644 --- a/devscripts/buildserver.py +++ b/devscripts/buildserver.py @@ -142,7 +142,7 @@ def win_service_set_status(handle, status_code):  def win_service_main(service_name, real_main, argc, argv_raw):      try: -        #args = [argv_raw[i].value for i in range(argc)] +        # args = [argv_raw[i].value for i in range(argc)]          stop_event = threading.Event()          handler = HandlerEx(functools.partial(stop_event, win_service_handler))          h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None) diff --git a/devscripts/fish-completion.py b/devscripts/fish-completion.py index 2185d5522..f2445984f 100755 --- a/devscripts/fish-completion.py +++ b/devscripts/fish-completion.py @@ -30,7 +30,7 @@ def build_completion(opt_parser):      for group in opt_parser.option_groups:          for option in group.option_list:              long_option = option.get_opt_string().strip('-') -            help_msg = shell_quote([option.help]) +            shell_quote([option.help])              complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]              if option._short_opts:                  complete_cmd += ['--short-option', option._short_opts[0].strip('-')] @@ -4,7 +4,6 @@  from __future__ import print_function  import os.path -import pkg_resources  import warnings  import sys diff --git a/test/helper.py b/test/helper.py index 91822935f..2e320e2cf 100644 --- a/test/helper.py +++ b/test/helper.py @@ -116,14 +116,14 @@ def expect_info_dict(self, expected_dict, got_dict):          elif isinstance(expected, type):              got = got_dict.get(info_field)              self.assertTrue(isinstance(got, expected), -                'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got))) +                            'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))          else:              if isinstance(expected, compat_str) and expected.startswith('md5:'):                  got = 'md5:' + md5(got_dict.get(info_field))              else:                  got = got_dict.get(info_field)              self.assertEqual(expected, got, -                'invalid value for field %s, expected %r, got %r' % (info_field, expected, got)) +                             'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))      # Check for the presence of mandatory fields      if got_dict.get('_type') != 'playlist': @@ -135,8 +135,8 @@ def expect_info_dict(self, expected_dict, got_dict):      # Are checkable fields missing from the test case definition?      test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value)) -        for key, value in got_dict.items() -        if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location')) +                          for key, value in got_dict.items() +                          if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))      missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())      if missing_keys:          def _repr(v): diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index d9e93f5d2..21c7c298a 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -314,7 +314,7 @@ class YoutubeDL(object):          self._output_process.stdin.write((message + '\n').encode('utf-8'))          self._output_process.stdin.flush()          res = ''.join(self._output_channel.readline().decode('utf-8') -                       for _ in range(line_count)) +                      for _ in range(line_count))          return res[:-len('\n')]      def to_screen(self, message, skip_eol=False): @@ -701,13 +701,15 @@ class YoutubeDL(object):                  'It needs to be updated.' % ie_result.get('extractor'))              def _fixup(r): -                self.add_extra_info(r, +                self.add_extra_info( +                    r,                      {                          'extractor': ie_result['extractor'],                          'webpage_url': ie_result['webpage_url'],                          'webpage_url_basename': url_basename(ie_result['webpage_url']),                          'extractor_key': ie_result['extractor_key'], -                    }) +                    } +                )                  return r              ie_result['entries'] = [                  self.process_ie_result(_fixup(r), download, extra_info) @@ -857,14 +859,14 @@ class YoutubeDL(object):                          # Two formats have been requested like '137+139'                          format_1, format_2 = rf.split('+')                          formats_info = (self.select_format(format_1, formats), -                            self.select_format(format_2, formats)) +                                        self.select_format(format_2, formats))                          if all(formats_info):                              # The first format must contain the video and the                              # second the audio                              if formats_info[0].get('vcodec') == 'none':                                  self.report_error('The first format must ' -                                    'contain the video, try using ' -                                    '"-f %s+%s"' % (format_2, format_1)) +                                                  'contain the video, try using ' +                                                  '"-f %s+%s"' % (format_2, format_1))                                  return                              selected_format = {                                  'requested_formats': formats_info, @@ -1042,10 +1044,10 @@ class YoutubeDL(object):                          with open(thumb_filename, 'wb') as thumbf:                              shutil.copyfileobj(uf, thumbf)                          self.to_screen('[%s] %s: Writing thumbnail to: %s' % -                            (info_dict['extractor'], info_dict['id'], thumb_filename)) +                                       (info_dict['extractor'], info_dict['id'], thumb_filename))                      except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:                          self.report_warning('Unable to download thumbnail "%s": %s' % -                            (info_dict['thumbnail'], compat_str(err))) +                                            (info_dict['thumbnail'], compat_str(err)))          if not self.params.get('skip_download', False):              if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): @@ -1066,8 +1068,8 @@ class YoutubeDL(object):                          if not merger._executable:                              postprocessors = []                              self.report_warning('You have requested multiple ' -                                'formats but ffmpeg or avconv are not installed.' -                                ' The formats won\'t be merged') +                                                'formats but ffmpeg or avconv are not installed.' +                                                ' The formats won\'t be merged')                          else:                              postprocessors = [merger]                          for f in info_dict['requested_formats']: diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py index 9087b4f85..2983501ae 100644 --- a/youtube_dl/compat.py +++ b/youtube_dl/compat.py @@ -116,7 +116,7 @@ except ImportError:  # Python 2      # Python 2's version is apparently totally broken      def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, -                encoding='utf-8', errors='replace'): +                   encoding='utf-8', errors='replace'):          qs, _coerce_result = qs, unicode          pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]          r = [] @@ -145,10 +145,10 @@ except ImportError:  # Python 2          return r      def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, -                encoding='utf-8', errors='replace'): +                        encoding='utf-8', errors='replace'):          parsed_result = {}          pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, -                        encoding=encoding, errors=errors) +                           encoding=encoding, errors=errors)          for name, value in pairs:              if name in parsed_result:                  parsed_result[name].append(value) diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py index c752e8e24..7cd22c504 100644 --- a/youtube_dl/downloader/f4m.py +++ b/youtube_dl/downloader/f4m.py @@ -225,13 +225,15 @@ class F4mFD(FileDownloader):          self.to_screen('[download] Downloading f4m manifest')          manifest = self.ydl.urlopen(man_url).read()          self.report_destination(filename) -        http_dl = HttpQuietDownloader(self.ydl, +        http_dl = HttpQuietDownloader( +            self.ydl,              {                  'continuedl': True,                  'quiet': True,                  'noprogress': True,                  'test': self.params.get('test', False), -            }) +            } +        )          doc = etree.fromstring(manifest)          formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))] @@ -277,7 +279,7 @@ class F4mFD(FileDownloader):          def frag_progress_hook(status):              frag_total_bytes = status.get('total_bytes', 0)              estimated_size = (state['downloaded_bytes'] + -                (total_frags - state['frag_counter']) * frag_total_bytes) +                              (total_frags - state['frag_counter']) * frag_total_bytes)              if status['status'] == 'finished':                  state['downloaded_bytes'] += frag_total_bytes                  state['frag_counter'] += 1 @@ -287,13 +289,13 @@ class F4mFD(FileDownloader):                  frag_downloaded_bytes = status['downloaded_bytes']                  byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes                  frag_progress = self.calc_percent(frag_downloaded_bytes, -                    frag_total_bytes) +                                                  frag_total_bytes)                  progress = self.calc_percent(state['frag_counter'], total_frags)                  progress += frag_progress / float(total_frags)              eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)              self.report_progress(progress, format_bytes(estimated_size), -                status.get('speed'), eta) +                                 status.get('speed'), eta)          http_dl.add_progress_hook(frag_progress_hook)          frags_filenames = [] diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py index 567a76cf0..87580147d 100644 --- a/youtube_dl/extractor/appletrailers.py +++ b/youtube_dl/extractor/appletrailers.py @@ -88,7 +88,7 @@ class AppleTrailersIE(InfoExtractor):          for li in doc.findall('./div/ul/li'):              on_click = li.find('.//a').attrib['onClick']              trailer_info_json = self._search_regex(self._JSON_RE, -                on_click, 'trailer info') +                                                   on_click, 'trailer info')              trailer_info = json.loads(trailer_info_json)              title = trailer_info['title']              video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py index eab99faaa..928ea61a3 100644 --- a/youtube_dl/extractor/bambuser.py +++ b/youtube_dl/extractor/bambuser.py @@ -38,7 +38,7 @@ class BambuserIE(InfoExtractor):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('id')          info_url = ('http://player-c.api.bambuser.com/getVideo.json?' -            '&api_key=%s&vid=%s' % (self._API_KEY, video_id)) +                    '&api_key=%s&vid=%s' % (self._API_KEY, video_id))          info_json = self._download_webpage(info_url, video_id)          info = json.loads(info_json)['result'] @@ -74,8 +74,8 @@ class BambuserChannelIE(InfoExtractor):          last_id = ''          for i in itertools.count(1):              req_url = ('http://bambuser.com/xhr-api/index.php?username={user}' -                '&sort=created&access_mode=0%2C1%2C2&limit={count}' -                '&method=broadcast&format=json&vid_older_than={last}' +                       '&sort=created&access_mode=0%2C1%2C2&limit={count}' +                       '&method=broadcast&format=json&vid_older_than={last}'                  ).format(user=user, count=self._STEP, last=last_id)              req = compat_urllib_request.Request(req_url)              # Without setting this header, we wouldn't get any result diff --git a/youtube_dl/extractor/bbccouk.py b/youtube_dl/extractor/bbccouk.py index fa15bf19c..6a507e113 100644 --- a/youtube_dl/extractor/bbccouk.py +++ b/youtube_dl/extractor/bbccouk.py @@ -165,10 +165,10 @@ class BBCCoUkIE(SubtitlesInfoExtractor):          webpage = self._download_webpage(url, group_id, 'Downloading video page')          if re.search(r'id="emp-error" class="notinuk">', webpage):              raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only', -                expected=True) +                                 expected=True)          playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id, -            'Downloading playlist XML') +                                      'Downloading playlist XML')          no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')          if no_items is not None: diff --git a/youtube_dl/extractor/cnn.py b/youtube_dl/extractor/cnn.py index 3826ce7e1..81142ee41 100644 --- a/youtube_dl/extractor/cnn.py +++ b/youtube_dl/extractor/cnn.py @@ -25,8 +25,7 @@ class CNNIE(InfoExtractor):              'duration': 135,              'upload_date': '20130609',          }, -    }, -    { +    }, {          "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",          "md5": "b5cc60c60a3477d185af8f19a2a26f4e",          "info_dict": { diff --git a/youtube_dl/extractor/collegehumor.py b/youtube_dl/extractor/collegehumor.py index 6f866e7fc..002b24037 100644 --- a/youtube_dl/extractor/collegehumor.py +++ b/youtube_dl/extractor/collegehumor.py @@ -10,47 +10,46 @@ from ..utils import int_or_none  class CollegeHumorIE(InfoExtractor):      _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$' -    _TESTS = [{ -        'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', -        'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd', -        'info_dict': { -            'id': '6902724', -            'ext': 'mp4', -            'title': 'Comic-Con Cosplay Catastrophe', -            'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.", -            'age_limit': 13, -            'duration': 187, +    _TESTS = [ +        { +            'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', +            'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd', +            'info_dict': { +                'id': '6902724', +                'ext': 'mp4', +                'title': 'Comic-Con Cosplay Catastrophe', +                'description': "Fans get creative this year at San Diego.  Too creative.  And yes, that's really Joss Whedon.", +                'age_limit': 13, +                'duration': 187, +            }, +        }, { +            'url': 'http://www.collegehumor.com/video/3505939/font-conference', +            'md5': '72fa701d8ef38664a4dbb9e2ab721816', +            'info_dict': { +                'id': '3505939', +                'ext': 'mp4', +                'title': 'Font Conference', +                'description': "This video wasn't long enough, so we made it double-spaced.", +                'age_limit': 10, +                'duration': 179, +            }, +        }, { +            # embedded youtube video +            'url': 'http://www.collegehumor.com/embed/6950306', +            'info_dict': { +                'id': 'Z-bao9fg6Yc', +                'ext': 'mp4', +                'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!', +                'uploader': 'Mark Dice', +                'uploader_id': 'MarkDice', +                'description': 'md5:62c3dab9351fac7bb44b53b69511d87f', +                'upload_date': '20140127', +            }, +            'params': { +                'skip_download': True, +            }, +            'add_ie': ['Youtube'],          }, -    }, -    { -        'url': 'http://www.collegehumor.com/video/3505939/font-conference', -        'md5': '72fa701d8ef38664a4dbb9e2ab721816', -        'info_dict': { -            'id': '3505939', -            'ext': 'mp4', -            'title': 'Font Conference', -            'description': "This video wasn't long enough, so we made it double-spaced.", -            'age_limit': 10, -            'duration': 179, -        }, -    }, -    # embedded youtube video -    { -        'url': 'http://www.collegehumor.com/embed/6950306', -        'info_dict': { -            'id': 'Z-bao9fg6Yc', -            'ext': 'mp4', -            'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!', -            'uploader': 'Mark Dice', -            'uploader_id': 'MarkDice', -            'description': 'md5:62c3dab9351fac7bb44b53b69511d87f', -            'upload_date': '20140127', -        }, -        'params': { -            'skip_download': True, -        }, -        'add_ie': ['Youtube'], -    },      ]      def _real_extract(self, url): diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index f0489ede4..3c2d46dd5 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -478,7 +478,7 @@ class InfoExtractor(object):              raise RegexNotFoundError('Unable to extract %s' % _name)          else:              self._downloader.report_warning('unable to extract %s; ' -                'please report this issue on http://yt-dl.org/bug' % _name) +                                            'please report this issue on http://yt-dl.org/bug' % _name)              return None      def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None): @@ -612,7 +612,7 @@ class InfoExtractor(object):      def _twitter_search_player(self, html):          return self._html_search_meta('twitter:player', html, -            'twitter card player') +                                      'twitter card player')      def _sort_formats(self, formats):          if not formats: diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 22cdcdfa5..936c13cd6 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -114,7 +114,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):          embed_page = self._download_webpage(embed_url, video_id,                                              'Downloading embed page')          info = self._search_regex(r'var info = ({.*?}),$', embed_page, -            'video info', flags=re.MULTILINE) +                                  'video info', flags=re.MULTILINE)          info = json.loads(info)          if info.get('error') is not None:              msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title'] @@ -208,7 +208,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):              if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:                  break          return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') -                   for video_id in orderedSet(video_ids)] +                for video_id in orderedSet(video_ids)]      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) diff --git a/youtube_dl/extractor/defense.py b/youtube_dl/extractor/defense.py index 3ffed3d44..5e50c63d9 100644 --- a/youtube_dl/extractor/defense.py +++ b/youtube_dl/extractor/defense.py @@ -9,7 +9,7 @@ from .common import InfoExtractor  class DefenseGouvFrIE(InfoExtractor):      IE_NAME = 'defense.gouv.fr'      _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/' -        r'ligthboxvideo/base-de-medias/webtv/(.*)') +                  r'ligthboxvideo/base-de-medias/webtv/(.*)')      _TEST = {          'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1', @@ -28,9 +28,9 @@ class DefenseGouvFrIE(InfoExtractor):              webpage, 'ID')          json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/' -            + video_id) +                    + video_id)          info = self._download_webpage(json_url, title, -                                                  'Downloading JSON config') +                                      'Downloading JSON config')          video_url = json.loads(info)['renditions'][0]['url']          return {'id': video_id, diff --git a/youtube_dl/extractor/discovery.py b/youtube_dl/extractor/discovery.py index 554df6735..52c2d7ddf 100644 --- a/youtube_dl/extractor/discovery.py +++ b/youtube_dl/extractor/discovery.py @@ -16,9 +16,9 @@ class DiscoveryIE(InfoExtractor):              'ext': 'mp4',              'title': 'MythBusters: Mission Impossible Outtakes',              'description': ('Watch Jamie Hyneman and Adam Savage practice being' -                ' each other -- to the point of confusing Jamie\'s dog -- and ' -                'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s' -                ' back.'), +                            ' each other -- to the point of confusing Jamie\'s dog -- and ' +                            'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s' +                            ' back.'),              'duration': 156,          },      } @@ -29,7 +29,7 @@ class DiscoveryIE(InfoExtractor):          webpage = self._download_webpage(url, video_id)          video_list_json = self._search_regex(r'var videoListJSON = ({.*?});', -            webpage, 'video list', flags=re.DOTALL) +                                             webpage, 'video list', flags=re.DOTALL)          video_list = json.loads(video_list_json)          info = video_list['clips'][0]          formats = [] diff --git a/youtube_dl/extractor/dropbox.py b/youtube_dl/extractor/dropbox.py index aefca848a..14b6c00b0 100644 --- a/youtube_dl/extractor/dropbox.py +++ b/youtube_dl/extractor/dropbox.py @@ -11,18 +11,18 @@ from ..utils import url_basename  class DropboxIE(InfoExtractor):      _VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*' -    _TESTS = [{ -        'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0', -        'info_dict': { -            'id': 'nelirfsxnmcfbfh', -            'ext': 'mp4', -            'title': 'youtube-dl test video \'ä"BaW_jenozKc' -        } -    }, -    { -        'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v', -        'only_matching': True, -    }, +    _TESTS = [ +        { +            'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0', +            'info_dict': { +                'id': 'nelirfsxnmcfbfh', +                'ext': 'mp4', +                'title': 'youtube-dl test video \'ä"BaW_jenozKc' +            } +        }, { +            'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v', +            'only_matching': True, +        },      ]      def _real_extract(self, url): diff --git a/youtube_dl/extractor/ehow.py b/youtube_dl/extractor/ehow.py index f8f49a013..b766e17f2 100644 --- a/youtube_dl/extractor/ehow.py +++ b/youtube_dl/extractor/ehow.py @@ -28,7 +28,7 @@ class EHowIE(InfoExtractor):          video_id = mobj.group('id')          webpage = self._download_webpage(url, video_id)          video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)', -            webpage, 'video URL') +                                       webpage, 'video URL')          final_url = compat_urllib_parse.unquote(video_url)          uploader = self._html_search_meta('uploader', webpage)          title = self._og_search_title(webpage).replace(' | eHow', '') diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index 104803563..c989879ba 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -60,8 +60,8 @@ class FacebookIE(InfoExtractor):          login_page_req = compat_urllib_request.Request(self._LOGIN_URL)          login_page_req.add_header('Cookie', 'locale=en_US')          login_page = self._download_webpage(login_page_req, None, -            note='Downloading login page', -            errnote='Unable to download login page') +                                            note='Downloading login page', +                                            errnote='Unable to download login page')          lsd = self._search_regex(              r'<input type="hidden" name="lsd" value="([^"]*)"',              login_page, 'lsd') @@ -82,7 +82,7 @@ class FacebookIE(InfoExtractor):          request.add_header('Content-Type', 'application/x-www-form-urlencoded')          try:              login_results = self._download_webpage(request, None, -                note='Logging in', errnote='unable to fetch login page') +                                                   note='Logging in', errnote='unable to fetch login page')              if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:                  self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')                  return @@ -96,7 +96,7 @@ class FacebookIE(InfoExtractor):              check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))              check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')              check_response = self._download_webpage(check_req, None, -                note='Confirming login') +                                                    note='Confirming login')              if re.search(r'id="checkpointSubmitButton"', check_response) is not None:                  self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: diff --git a/youtube_dl/extractor/firsttv.py b/youtube_dl/extractor/firsttv.py index 3410daa98..08ceee4ed 100644 --- a/youtube_dl/extractor/firsttv.py +++ b/youtube_dl/extractor/firsttv.py @@ -44,9 +44,9 @@ class FirstTVIE(InfoExtractor):          duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)          like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]', -            webpage, 'like count', fatal=False) +                                             webpage, 'like count', fatal=False)          dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]', -            webpage, 'dislike count', fatal=False) +                                                webpage, 'dislike count', fatal=False)          return {              'id': video_id, diff --git a/youtube_dl/extractor/fivemin.py b/youtube_dl/extractor/fivemin.py index 3a50bab5c..f9c127ce6 100644 --- a/youtube_dl/extractor/fivemin.py +++ b/youtube_dl/extractor/fivemin.py @@ -50,7 +50,7 @@ class FiveMinIE(InfoExtractor):          video_id = mobj.group('id')          embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id          embed_page = self._download_webpage(embed_url, video_id, -            'Downloading embed page') +                                            'Downloading embed page')          sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')          query = compat_urllib_parse.urlencode({              'func': 'GetResults', diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py index 21b89142c..d09d1c13a 100644 --- a/youtube_dl/extractor/fktv.py +++ b/youtube_dl/extractor/fktv.py @@ -32,9 +32,9 @@ class FKTVIE(InfoExtractor):          server = random.randint(2, 4)          video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode          start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode, -            episode) +                                               episode)          playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage, -            'playlist', flags=re.DOTALL) +                                      'playlist', flags=re.DOTALL)          files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))          # TODO: return a single multipart video          videos = [] diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py index 5b0bc9d21..0c858b654 100644 --- a/youtube_dl/extractor/flickr.py +++ b/youtube_dl/extractor/flickr.py @@ -37,7 +37,7 @@ class FlickrIE(InfoExtractor):          first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')          node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>', -            first_xml, 'node_id') +                                          first_xml, 'node_id')          second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'          second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage') diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py index 24d4e9754..701241bb0 100644 --- a/youtube_dl/extractor/fourtube.py +++ b/youtube_dl/extractor/fourtube.py @@ -55,7 +55,7 @@ class FourTubeIE(InfoExtractor):          description = self._html_search_meta('description', webpage, 'description')          if description:              upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date', -                fatal=False) +                                             fatal=False)              if upload_date:                  upload_date = unified_strdate(upload_date)              view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False) diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py index 35d7d15e1..d7e9aef90 100644 --- a/youtube_dl/extractor/francetv.py +++ b/youtube_dl/extractor/francetv.py @@ -234,7 +234,7 @@ class GenerationQuoiIE(InfoExtractor):          info_json = self._download_webpage(info_url, name)          info = json.loads(info_json)          return self.url_result('http://www.dailymotion.com/video/%s' % info['id'], -            ie='Dailymotion') +                               ie='Dailymotion')  class CultureboxIE(FranceTVBaseInfoExtractor): diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 109dd20db..d224aa8e1 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -784,7 +784,7 @@ class GenericIE(InfoExtractor):          # Look for Ooyala videos          mobj = (re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or -             re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)) +                re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))          if mobj is not None:              return OoyalaIE._build_url_result(mobj.group('ec')) diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py index 4ddf06409..3f7d6666c 100644 --- a/youtube_dl/extractor/howcast.py +++ b/youtube_dl/extractor/howcast.py @@ -27,10 +27,10 @@ class HowcastIE(InfoExtractor):          self.report_extraction(video_id)          video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)', -            webpage, 'video URL') +                                       webpage, 'video URL')          video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'', -            webpage, 'description', fatal=False) +                                                    webpage, 'description', fatal=False)          return {              'id': video_id, diff --git a/youtube_dl/extractor/ign.py b/youtube_dl/extractor/ign.py index c80185b53..3555f98a5 100644 --- a/youtube_dl/extractor/ign.py +++ b/youtube_dl/extractor/ign.py @@ -99,7 +99,7 @@ class IGNIE(InfoExtractor):          video_id = self._find_video_id(webpage)          result = self._get_video_info(video_id)          description = self._html_search_regex(self._DESCRIPTION_RE, -            webpage, 'video description', flags=re.DOTALL) +                                              webpage, 'video description', flags=re.DOTALL)          result['description'] = description          return result diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py index 5109f26ce..b020e2621 100644 --- a/youtube_dl/extractor/instagram.py +++ b/youtube_dl/extractor/instagram.py @@ -27,9 +27,9 @@ class InstagramIE(InfoExtractor):          video_id = mobj.group('id')          webpage = self._download_webpage(url, video_id)          uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"', -            webpage, 'uploader id', fatal=False) +                                         webpage, 'uploader id', fatal=False)          desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description', -            fatal=False) +                                  fatal=False)          return {              'id': video_id, diff --git a/youtube_dl/extractor/internetvideoarchive.py b/youtube_dl/extractor/internetvideoarchive.py index c137f4a5d..1e4799187 100644 --- a/youtube_dl/extractor/internetvideoarchive.py +++ b/youtube_dl/extractor/internetvideoarchive.py @@ -45,22 +45,26 @@ class InternetVideoArchiveIE(InfoExtractor):          url = self._build_url(query)          flashconfiguration = self._download_xml(url, video_id, -            'Downloading flash configuration') +                                                'Downloading flash configuration')          file_url = flashconfiguration.find('file').text          file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')          # Replace some of the parameters in the query to get the best quality          # and http links (no m3u8 manifests)          file_url = re.sub(r'(?<=\?)(.+)$', -            lambda m: self._clean_query(m.group()), -            file_url) +                          lambda m: self._clean_query(m.group()), +                          file_url)          info = self._download_xml(file_url, video_id, -            'Downloading video info') +                                  'Downloading video info')          item = info.find('channel/item')          def _bp(p): -            return xpath_with_ns(p, -                {'media': 'http://search.yahoo.com/mrss/', -                'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'}) +            return xpath_with_ns( +                p, +                { +                    'media': 'http://search.yahoo.com/mrss/', +                    'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats', +                } +            )          formats = []          for content in item.findall(_bp('media:group/media:content')):              attr = content.attrib diff --git a/youtube_dl/extractor/jukebox.py b/youtube_dl/extractor/jukebox.py index 5aa32bf09..da8068efc 100644 --- a/youtube_dl/extractor/jukebox.py +++ b/youtube_dl/extractor/jukebox.py @@ -36,7 +36,7 @@ class JukeboxIE(InfoExtractor):          try:              video_url = self._search_regex(r'"config":{"file":"(?P<video_url>http:[^"]+\?mdtk=[0-9]+)"', -                iframe_html, 'video url') +                                           iframe_html, 'video url')              video_url = unescapeHTML(video_url).replace('\/', '/')          except RegexNotFoundError:              youtube_url = self._search_regex( @@ -47,9 +47,9 @@ class JukeboxIE(InfoExtractor):              return self.url_result(youtube_url, ie='Youtube')          title = self._html_search_regex(r'<h1 class="inline">([^<]+)</h1>', -            html, 'title') +                                        html, 'title')          artist = self._html_search_regex(r'<span id="infos_article_artist">([^<]+)</span>', -            html, 'artist') +                                         html, 'artist')          return {              'id': video_id, diff --git a/youtube_dl/extractor/kickstarter.py b/youtube_dl/extractor/kickstarter.py index 827091e60..7d4b57056 100644 --- a/youtube_dl/extractor/kickstarter.py +++ b/youtube_dl/extractor/kickstarter.py @@ -13,8 +13,10 @@ class KickStarterIE(InfoExtractor):              'id': '1404461844',              'ext': 'mp4',              'title': 'Intersection: The Story of Josh Grant by Kyle Cowling', -            'description': 'A unique motocross documentary that examines the ' -                'life and mind of one of sports most elite athletes: Josh Grant.', +            'description': ( +                'A unique motocross documentary that examines the ' +                'life and mind of one of sports most elite athletes: Josh Grant.' +            ),          },      }, {          'note': 'Embedded video (not using the native kickstarter video service)', diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py index 97ca4337b..2160d6cb0 100644 --- a/youtube_dl/extractor/lynda.py +++ b/youtube_dl/extractor/lynda.py @@ -45,7 +45,7 @@ class LyndaIE(SubtitlesInfoExtractor):          video_id = mobj.group(1)          page = self._download_webpage('http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id, -            'Downloading video JSON') +                                      'Downloading video JSON')          video_json = json.loads(page)          if 'Status' in video_json: diff --git a/youtube_dl/extractor/m6.py b/youtube_dl/extractor/m6.py index 3d806323a..7e025831b 100644 --- a/youtube_dl/extractor/m6.py +++ b/youtube_dl/extractor/m6.py @@ -27,7 +27,7 @@ class M6IE(InfoExtractor):          video_id = mobj.group('id')          rss = self._download_xml('http://ws.m6.fr/v1/video/info/m6/bonus/%s' % video_id, video_id, -            'Downloading video RSS') +                                 'Downloading video RSS')          title = rss.find('./channel/item/title').text          description = rss.find('./channel/item/description').text diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index f68add6c0..858c1c0c3 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -219,8 +219,8 @@ class MetacafeIE(InfoExtractor):          description = self._og_search_description(webpage)          thumbnail = self._og_search_thumbnail(webpage)          video_uploader = self._html_search_regex( -                r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);', -                webpage, 'uploader nickname', fatal=False) +            r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);', +            webpage, 'uploader nickname', fatal=False)          duration = int_or_none(              self._html_search_meta('video:duration', webpage)) diff --git a/youtube_dl/extractor/metacritic.py b/youtube_dl/extractor/metacritic.py index 07f072924..e30320569 100644 --- a/youtube_dl/extractor/metacritic.py +++ b/youtube_dl/extractor/metacritic.py @@ -28,7 +28,7 @@ class MetacriticIE(InfoExtractor):          webpage = self._download_webpage(url, video_id)          # The xml is not well formatted, there are raw '&'          info = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id, -            video_id, 'Downloading info xml', transform_source=fix_xml_ampersands) +                                  video_id, 'Downloading info xml', transform_source=fix_xml_ampersands)          clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)          formats = [] @@ -44,7 +44,7 @@ class MetacriticIE(InfoExtractor):          self._sort_formats(formats)          description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>', -            webpage, 'description', flags=re.DOTALL) +                                              webpage, 'description', flags=re.DOTALL)          return {              'id': video_id, diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index b6755ff01..506d2d5a0 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -53,7 +53,7 @@ class MTVServicesInfoExtractor(InfoExtractor):          # Otherwise we get a webpage that would execute some javascript          req.add_header('Youtubedl-user-agent', 'curl/7')          webpage = self._download_webpage(req, mtvn_id, -            'Downloading mobile page') +                                         'Downloading mobile page')          metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))          req = HEADRequest(metrics_url)          response = self._request_webpage(req, mtvn_id, 'Resolving url') @@ -66,10 +66,10 @@ class MTVServicesInfoExtractor(InfoExtractor):          if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:              if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:                  self.to_screen('The normal version is not available from your ' -                    'country, trying with the mobile version') +                               'country, trying with the mobile version')                  return self._extract_mobile_video_formats(mtvn_id)              raise ExtractorError('This video is not available from your country.', -                expected=True) +                                 expected=True)          formats = []          for rendition in mdoc.findall('.//rendition'): @@ -98,7 +98,7 @@ class MTVServicesInfoExtractor(InfoExtractor):              mediagen_url += '&acceptMethods=fms'          mediagen_doc = self._download_xml(mediagen_url, video_id, -            'Downloading video urls') +                                          'Downloading video urls')          description_node = itemdoc.find('description')          if description_node is not None: @@ -126,7 +126,7 @@ class MTVServicesInfoExtractor(InfoExtractor):          # This a short id that's used in the webpage urls          mtvn_id = None          mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category', -                'scheme', 'urn:mtvn:id') +                                       'scheme', 'urn:mtvn:id')          if mtvn_id_node is not None:              mtvn_id = mtvn_id_node.text @@ -188,7 +188,7 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):          video_id = self._id_from_uri(uri)          site_id = uri.replace(video_id, '')          config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/' -            'context4/context5/config.xml'.format(site_id)) +                      'context4/context5/config.xml'.format(site_id))          config_doc = self._download_xml(config_url, video_id)          feed_node = config_doc.find('.//feed')          feed_url = feed_node.text.strip().split('?')[0] diff --git a/youtube_dl/extractor/myspace.py b/youtube_dl/extractor/myspace.py index c16939f54..553ded56d 100644 --- a/youtube_dl/extractor/myspace.py +++ b/youtube_dl/extractor/myspace.py @@ -53,7 +53,7 @@ class MySpaceIE(InfoExtractor):              # songs don't store any useful info in the 'context' variable              def search_data(name):                  return self._search_regex(r'data-%s="(.*?)"' % name, webpage, -                    name) +                                          name)              streamUrl = search_data('stream-url')              info = {                  'id': video_id, @@ -63,7 +63,7 @@ class MySpaceIE(InfoExtractor):              }          else:              context = json.loads(self._search_regex(r'context = ({.*?});', webpage, -                u'context')) +                                                    u'context'))              video = context['video']              streamUrl = video['streamUrl']              info = { diff --git a/youtube_dl/extractor/myvideo.py b/youtube_dl/extractor/myvideo.py index 956cf8b86..9195e53bd 100644 --- a/youtube_dl/extractor/myvideo.py +++ b/youtube_dl/extractor/myvideo.py @@ -72,7 +72,7 @@ class MyVideoIE(InfoExtractor):              video_url = mobj.group(1) + '.flv'              video_title = self._html_search_regex('<title>([^<]+)</title>', -                webpage, 'title') +                                                  webpage, 'title')              return {                  'id': video_id, @@ -162,7 +162,7 @@ class MyVideoIE(InfoExtractor):          video_swfobj = compat_urllib_parse.unquote(video_swfobj)          video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>", -            webpage, 'title') +                                              webpage, 'title')          return {              'id': video_id, diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py index 5ce35dbf5..fbe34defd 100644 --- a/youtube_dl/extractor/naver.py +++ b/youtube_dl/extractor/naver.py @@ -30,7 +30,7 @@ class NaverIE(InfoExtractor):          video_id = mobj.group(1)          webpage = self._download_webpage(url, video_id)          m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"', -            webpage) +                         webpage)          if m_id is None:              m_error = re.search(                  r'(?s)<div class="nation_error">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>', diff --git a/youtube_dl/extractor/nfb.py b/youtube_dl/extractor/nfb.py index 09dcc8c84..7ce1d481d 100644 --- a/youtube_dl/extractor/nfb.py +++ b/youtube_dl/extractor/nfb.py @@ -38,12 +38,12 @@ class NFBIE(InfoExtractor):          page = self._download_webpage('https://www.nfb.ca/film/%s' % video_id, video_id, 'Downloading film page')          uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"', -            page, 'director id', fatal=False) +                                              page, 'director id', fatal=False)          uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>', -            page, 'director name', fatal=False) +                                           page, 'director name', fatal=False)          request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id, -            compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii')) +                                                compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))          request.add_header('Content-Type', 'application/x-www-form-urlencoded')          request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf') diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py index 31813eb68..719eb51a4 100644 --- a/youtube_dl/extractor/nhl.py +++ b/youtube_dl/extractor/nhl.py @@ -125,7 +125,7 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):              self._downloader.report_warning(u'Got an empty reponse, trying '                                              'adding the "newvideos" parameter')              response = self._download_webpage(request_url + '&newvideos=true', -                playlist_title) +                                              playlist_title)              response = self._fix_json(response)          videos = json.loads(response) diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py index 3b5784e8f..1d9c1a096 100644 --- a/youtube_dl/extractor/niconico.py +++ b/youtube_dl/extractor/niconico.py @@ -111,7 +111,7 @@ class NiconicoIE(InfoExtractor):          if 'deleted=' in flv_info_webpage:              raise ExtractorError('The video has been deleted.', -                expected=True) +                                 expected=True)          video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]          # Start extracting information @@ -170,13 +170,13 @@ class NiconicoPlaylistIE(InfoExtractor):          webpage = self._download_webpage(url, list_id)          entries_json = self._search_regex(r'Mylist\.preload\(\d+, (\[.*\])\);', -            webpage, 'entries') +                                          webpage, 'entries')          entries = json.loads(entries_json)          entries = [{              '_type': 'url',              'ie_key': NiconicoIE.ie_key(),              'url': ('http://www.nicovideo.jp/watch/%s' % -                entry['item_data']['video_id']), +                    entry['item_data']['video_id']),          } for entry in entries]          return { diff --git a/youtube_dl/extractor/ninegag.py b/youtube_dl/extractor/ninegag.py index 33daa0dec..16a02ad79 100644 --- a/youtube_dl/extractor/ninegag.py +++ b/youtube_dl/extractor/ninegag.py @@ -27,8 +27,7 @@ class NineGagIE(InfoExtractor):              "thumbnail": "re:^https?://",          },          'add_ie': ['Youtube'] -    }, -    { +    }, {          'url': 'http://9gag.tv/p/KklwM/alternate-banned-opening-scene-of-gravity?ref=fsidebar',          'info_dict': {              'id': 'KklwM', diff --git a/youtube_dl/extractor/normalboots.py b/youtube_dl/extractor/normalboots.py index 25e71a56e..3d35b11ac 100644 --- a/youtube_dl/extractor/normalboots.py +++ b/youtube_dl/extractor/normalboots.py @@ -31,9 +31,9 @@ class NormalbootsIE(InfoExtractor):          webpage = self._download_webpage(url, video_id)          video_uploader = self._html_search_regex(r'Posted\sby\s<a\shref="[A-Za-z0-9/]*">(?P<uploader>[A-Za-z]*)\s</a>', -            webpage, 'uploader') +                                                 webpage, 'uploader')          raw_upload_date = self._html_search_regex('<span style="text-transform:uppercase; font-size:inherit;">[A-Za-z]+, (?P<date>.*)</span>', -            webpage, 'date') +                                                  webpage, 'date')          video_upload_date = unified_strdate(raw_upload_date)          player_url = self._html_search_regex(r'<iframe\swidth="[0-9]+"\sheight="[0-9]+"\ssrc="(?P<url>[\S]+)"', webpage, 'url') diff --git a/youtube_dl/extractor/ooyala.py b/youtube_dl/extractor/ooyala.py index 30d80fa6b..f17a52858 100644 --- a/youtube_dl/extractor/ooyala.py +++ b/youtube_dl/extractor/ooyala.py @@ -43,7 +43,7 @@ class OoyalaIE(InfoExtractor):      @classmethod      def _build_url_result(cls, embed_code):          return cls.url_result(cls._url_for_embed_code(embed_code), -            ie=cls.ie_key()) +                              ie=cls.ie_key())      def _extract_result(self, info, more_info):          return { diff --git a/youtube_dl/extractor/photobucket.py b/youtube_dl/extractor/photobucket.py index 8aa69c46e..b4389e0b6 100644 --- a/youtube_dl/extractor/photobucket.py +++ b/youtube_dl/extractor/photobucket.py @@ -31,7 +31,7 @@ class PhotobucketIE(InfoExtractor):          # Extract URL, uploader, and title from webpage          self.report_extraction(video_id)          info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);', -            webpage, 'info json') +                                       webpage, 'info json')          info = json.loads(info_json)          url = compat_urllib_parse.unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url'))          return { diff --git a/youtube_dl/extractor/rbmaradio.py b/youtube_dl/extractor/rbmaradio.py index 2c53ed2e1..0f8f3ebde 100644 --- a/youtube_dl/extractor/rbmaradio.py +++ b/youtube_dl/extractor/rbmaradio.py @@ -33,7 +33,7 @@ class RBMARadioIE(InfoExtractor):          webpage = self._download_webpage(url, video_id)          json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$', -            webpage, 'json data', flags=re.MULTILINE) +                                       webpage, 'json data', flags=re.MULTILINE)          try:              data = json.loads(json_data) diff --git a/youtube_dl/extractor/sbs.py b/youtube_dl/extractor/sbs.py index 409f8540a..b8775c2f9 100644 --- a/youtube_dl/extractor/sbs.py +++ b/youtube_dl/extractor/sbs.py @@ -27,8 +27,7 @@ class SBSIE(InfoExtractor):              'thumbnail': 're:http://.*\.jpg',          },          'add_ies': ['generic'], -    }, -    { +    }, {          'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed',          'only_matching': True,      }] diff --git a/youtube_dl/extractor/screencast.py b/youtube_dl/extractor/screencast.py index 306869e6a..c145f6fc7 100644 --- a/youtube_dl/extractor/screencast.py +++ b/youtube_dl/extractor/screencast.py @@ -96,7 +96,7 @@ class ScreencastIE(InfoExtractor):          if title is None:              title = self._html_search_regex(                  [r'<b>Title:</b> ([^<]*)</div>', -                r'class="tabSeperator">></span><span class="tabText">(.*?)<'], +                 r'class="tabSeperator">></span><span class="tabText">(.*?)<'],                  webpage, 'title')          thumbnail = self._og_search_thumbnail(webpage)          description = self._og_search_description(webpage, default=None) diff --git a/youtube_dl/extractor/sina.py b/youtube_dl/extractor/sina.py index 2909ef18b..5eadbb7ea 100644 --- a/youtube_dl/extractor/sina.py +++ b/youtube_dl/extractor/sina.py @@ -46,7 +46,7 @@ class SinaIE(InfoExtractor):      def _extract_video(self, video_id):          data = compat_urllib_parse.urlencode({'vid': video_id})          url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data, -            video_id, 'Downloading video url') +                                     video_id, 'Downloading video url')          image_page = self._download_webpage(              'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data,              video_id, 'Downloading thumbnail info') diff --git a/youtube_dl/extractor/slutload.py b/youtube_dl/extractor/slutload.py index e6e7d0865..3df71304d 100644 --- a/youtube_dl/extractor/slutload.py +++ b/youtube_dl/extractor/slutload.py @@ -26,7 +26,7 @@ class SlutloadIE(InfoExtractor):          webpage = self._download_webpage(url, video_id)          video_title = self._html_search_regex(r'<h1><strong>([^<]+)</strong>', -            webpage, 'title').strip() +                                              webpage, 'title').strip()          video_url = self._html_search_regex(              r'(?s)<div id="vidPlayer"\s+data-url="([^"]+)"', diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py index b6a71305f..0751efc61 100644 --- a/youtube_dl/extractor/smotri.py +++ b/youtube_dl/extractor/smotri.py @@ -282,7 +282,7 @@ class SmotriBroadcastIE(InfoExtractor):              (username, password) = self._get_login_info()              if username is None:                  raise ExtractorError('Erotic broadcasts allowed only for registered users, ' -                    'use --username and --password options to provide account credentials.', expected=True) +                                     'use --username and --password options to provide account credentials.', expected=True)              login_form = {                  'login-hint53': '1', diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index f92f7fa24..410477d74 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -159,7 +159,7 @@ class SoundcloudIE(InfoExtractor):          # We have to retrieve the url          streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?' -            'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token)) +                       'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))          format_dict = self._download_json(              streams_url,              track_id, 'Downloading track url') diff --git a/youtube_dl/extractor/stanfordoc.py b/youtube_dl/extractor/stanfordoc.py index 5feb4ff83..4a3d8bb8f 100644 --- a/youtube_dl/extractor/stanfordoc.py +++ b/youtube_dl/extractor/stanfordoc.py @@ -82,7 +82,7 @@ class StanfordOpenClassroomIE(InfoExtractor):              rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'              rootpage = self._download_webpage(rootURL, info['id'], -                errnote='Unable to download course info page') +                                              errnote='Unable to download course info page')              links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))              info['entries'] = [self.url_result( diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py index d5dda34f6..5fa67eb8d 100644 --- a/youtube_dl/extractor/teamcoco.py +++ b/youtube_dl/extractor/teamcoco.py @@ -8,24 +8,23 @@ from .common import InfoExtractor  class TeamcocoIE(InfoExtractor):      _VALID_URL = r'http://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'      _TESTS = [ -    { -        'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant', -        'file': '80187.mp4', -        'md5': '3f7746aa0dc86de18df7539903d399ea', -        'info_dict': { -            'title': 'Conan Becomes A Mary Kay Beauty Consultant', -            'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.' +        { +            'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant', +            'file': '80187.mp4', +            'md5': '3f7746aa0dc86de18df7539903d399ea', +            'info_dict': { +                'title': 'Conan Becomes A Mary Kay Beauty Consultant', +                'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.' +            } +        }, { +            'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush', +            'file': '19705.mp4', +            'md5': 'cde9ba0fa3506f5f017ce11ead928f9a', +            'info_dict': { +                "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.", +                "title": "Louis C.K. Interview Pt. 1 11/3/11" +            }          } -    }, -    { -        'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush', -        'file': '19705.mp4', -        'md5': 'cde9ba0fa3506f5f017ce11ead928f9a', -        'info_dict': { -            "description": "Louis C.K. got starstruck by George W. Bush, so what? Part one.", -            "title": "Louis C.K. Interview Pt. 1 11/3/11" -        } -    }      ]      def _real_extract(self, url): diff --git a/youtube_dl/extractor/ted.py b/youtube_dl/extractor/ted.py index 855038077..f8a87afda 100644 --- a/youtube_dl/extractor/ted.py +++ b/youtube_dl/extractor/ted.py @@ -33,9 +33,9 @@ class TEDIE(SubtitlesInfoExtractor):              'ext': 'mp4',              'title': 'The illusion of consciousness',              'description': ('Philosopher Dan Dennett makes a compelling ' -                'argument that not only don\'t we understand our own ' -                'consciousness, but that half the time our brains are ' -                'actively fooling us.'), +                            'argument that not only don\'t we understand our own ' +                            'consciousness, but that half the time our brains are ' +                            'actively fooling us.'),              'uploader': 'Dan Dennett',              'width': 854,              'duration': 1308, @@ -93,7 +93,7 @@ class TEDIE(SubtitlesInfoExtractor):      def _extract_info(self, webpage):          info_json = self._search_regex(r'q\("\w+.init",({.+})\)</script>', -            webpage, 'info json') +                                       webpage, 'info json')          return json.loads(info_json)      def _real_extract(self, url): @@ -113,7 +113,7 @@ class TEDIE(SubtitlesInfoExtractor):          '''Returns the videos of the playlist'''          webpage = self._download_webpage(url, name, -            'Downloading playlist webpage') +                                         'Downloading playlist webpage')          info = self._extract_info(webpage)          playlist_info = info['playlist'] diff --git a/youtube_dl/extractor/tf1.py b/youtube_dl/extractor/tf1.py index fdae17b1b..6e61cc9e2 100644 --- a/youtube_dl/extractor/tf1.py +++ b/youtube_dl/extractor/tf1.py @@ -30,7 +30,7 @@ class TF1IE(InfoExtractor):          embed_url = self._html_search_regex(              r'"(https://www.wat.tv/embedframe/.*?)"', webpage, 'embed url')          embed_page = self._download_webpage(embed_url, video_id, -            'Downloading embed player page') +                                            'Downloading embed player page')          wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id')          wat_info = self._download_json(              'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id) diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py index 522a095a2..e2653d62d 100644 --- a/youtube_dl/extractor/theplatform.py +++ b/youtube_dl/extractor/theplatform.py @@ -47,7 +47,7 @@ class ThePlatformIE(InfoExtractor):              smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'          else:              smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?' -                'format=smil&mbr=true'.format(video_id)) +                        'format=smil&mbr=true'.format(video_id))          meta = self._download_xml(smil_url, video_id)          try: diff --git a/youtube_dl/extractor/tinypic.py b/youtube_dl/extractor/tinypic.py index 6ba5dc5f1..4fe89dbe5 100644 --- a/youtube_dl/extractor/tinypic.py +++ b/youtube_dl/extractor/tinypic.py @@ -28,7 +28,7 @@ class TinyPicIE(InfoExtractor):          webpage = self._download_webpage(url, video_id, 'Downloading page')          mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n' -            '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage) +                         '\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)          if mobj is None:              raise ExtractorError('Video %s does not exist' % video_id, expected=True) diff --git a/youtube_dl/extractor/traileraddict.py b/youtube_dl/extractor/traileraddict.py index b1a440e79..1c53a3fd0 100644 --- a/youtube_dl/extractor/traileraddict.py +++ b/youtube_dl/extractor/traileraddict.py @@ -25,7 +25,7 @@ class TrailerAddictIE(InfoExtractor):          webpage = self._download_webpage(url, name)          title = self._search_regex(r'<title>(.+?)</title>', -                webpage, 'video title').replace(' - Trailer Addict', '') +                                   webpage, 'video title').replace(' - Trailer Addict', '')          view_count_str = self._search_regex(              r'<span class="views_n">([0-9,.]+)</span>',              webpage, 'view count', fatal=False) @@ -46,9 +46,9 @@ class TrailerAddictIE(InfoExtractor):          info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")          final_url = self._search_regex(r'&fileurl=(.+)', -                info_webpage, 'Download url').replace('%3F', '?') +                                       info_webpage, 'Download url').replace('%3F', '?')          thumbnail_url = self._search_regex(r'&image=(.+?)&', -                info_webpage, 'thumbnail url') +                                           info_webpage, 'thumbnail url')          description = self._html_search_regex(              r'(?s)<div class="synopsis">.*?<div class="movie_label_info"[^>]*>(.*?)</div>', diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py index 40c53ff17..2a1ae5a71 100644 --- a/youtube_dl/extractor/tumblr.py +++ b/youtube_dl/extractor/tumblr.py @@ -43,7 +43,7 @@ class TumblrIE(InfoExtractor):              webpage, 'iframe url')          iframe = self._download_webpage(iframe_url, video_id)          video_url = self._search_regex(r'<source src="([^"]+)"', -            iframe, 'video url') +                                       iframe, 'video url')          # The only place where you can get a title, it's not complete,          # but searching in other places doesn't work for all videos diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py index 7df11fc19..1a7d01c67 100644 --- a/youtube_dl/extractor/udemy.py +++ b/youtube_dl/extractor/udemy.py @@ -154,7 +154,7 @@ class UdemyCourseIE(UdemyIE):              self.to_screen('%s: Already enrolled in' % course_id)          response = self._download_json('https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id, -            course_id, 'Downloading course curriculum') +                                       course_id, 'Downloading course curriculum')          entries = [              self.url_result('https://www.udemy.com/%s/#/lecture/%s' % (course_path, asset['id']), 'Udemy') diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py index 875450908..53dc3a496 100644 --- a/youtube_dl/extractor/ustream.py +++ b/youtube_dl/extractor/ustream.py @@ -45,13 +45,13 @@ class UstreamIE(InfoExtractor):          self.report_extraction(video_id)          video_title = self._html_search_regex(r'data-title="(?P<title>.+)"', -            webpage, 'title') +                                              webpage, 'title')          uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>', -            webpage, 'uploader', fatal=False, flags=re.DOTALL) +                                           webpage, 'uploader', fatal=False, flags=re.DOTALL)          thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"', -            webpage, 'thumbnail', fatal=False) +                                            webpage, 'thumbnail', fatal=False)          return {              'id': video_id, diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py index ebd64f0f5..455b6d9da 100644 --- a/youtube_dl/extractor/vbox7.py +++ b/youtube_dl/extractor/vbox7.py @@ -30,13 +30,13 @@ class Vbox7IE(InfoExtractor):          redirect_page, urlh = self._download_webpage_handle(url, video_id)          new_location = self._search_regex(r'window\.location = \'(.*)\';', -            redirect_page, 'redirect location') +                                          redirect_page, 'redirect location')          redirect_url = urlh.geturl() + new_location          webpage = self._download_webpage(redirect_url, video_id, -            'Downloading redirect page') +                                         'Downloading redirect page')          title = self._html_search_regex(r'<title>(.*)</title>', -            webpage, 'title').split('/')[0].strip() +                                        webpage, 'title').split('/')[0].strip()          info_url = "http://vbox7.com/play/magare.do"          data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id}) diff --git a/youtube_dl/extractor/veehd.py b/youtube_dl/extractor/veehd.py index 77b1f91ce..94647d1c8 100644 --- a/youtube_dl/extractor/veehd.py +++ b/youtube_dl/extractor/veehd.py @@ -48,11 +48,11 @@ class VeeHDIE(InfoExtractor):          video_url = compat_urlparse.unquote(config['clip']['url'])          title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0])          uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>', -            webpage, 'uploader') +                                              webpage, 'uploader')          thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"', -            webpage, 'thumbnail') +                                       webpage, 'thumbnail')          description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul', -            webpage, 'description', flags=re.DOTALL) +                                              webpage, 'description', flags=re.DOTALL)          return {              '_type': 'video', diff --git a/youtube_dl/extractor/vesti.py b/youtube_dl/extractor/vesti.py index 5aebcecd7..a0c59a2e0 100644 --- a/youtube_dl/extractor/vesti.py +++ b/youtube_dl/extractor/vesti.py @@ -112,7 +112,7 @@ class VestiIE(InfoExtractor):          if mobj:              video_id = mobj.group('id')              page = self._download_webpage('http://www.vesti.ru/only_video.html?vid=%s' % video_id, video_id, -                'Downloading video page') +                                          'Downloading video page')          rutv_url = RUTVIE._extract_url(page)          if rutv_url: diff --git a/youtube_dl/extractor/videofyme.py b/youtube_dl/extractor/videofyme.py index d69fe1e77..d764e60fb 100644 --- a/youtube_dl/extractor/videofyme.py +++ b/youtube_dl/extractor/videofyme.py @@ -28,11 +28,11 @@ class VideofyMeIE(InfoExtractor):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('id')          config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id, -                                            video_id) +                                    video_id)          video = config.find('video')          sources = video.find('sources')          url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key) -            for key in ['on', 'av', 'off']] if node is not None) +                                          for key in ['on', 'av', 'off']] if node is not None)          video_url = url_node.find('url').text          return {'id': video_id, diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index c744d4f04..06b0bed41 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -260,7 +260,7 @@ class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):                  else:                      config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']                  config = self._search_regex(config_re, webpage, 'info section', -                    flags=re.DOTALL) +                                            flags=re.DOTALL)                  config = json.loads(config)          except Exception as e:              if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage): diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py index 9d6ec4382..d9acafd70 100644 --- a/youtube_dl/extractor/vk.py +++ b/youtube_dl/extractor/vk.py @@ -121,7 +121,7 @@ class VKIE(InfoExtractor):          }          request = compat_urllib_request.Request('https://login.vk.com/?act=login', -            compat_urllib_parse.urlencode(login_form).encode('utf-8')) +                                                compat_urllib_parse.urlencode(login_form).encode('utf-8'))          login_page = self._download_webpage(request, None, note='Logging in as %s' % username)          if re.search(r'onLoginFailed', login_page): @@ -175,7 +175,7 @@ class VKIE(InfoExtractor):          upload_date = None          mobj = re.search(r'id="mv_date_wrap".*?Added ([a-zA-Z]+ [0-9]+), ([0-9]+) at', info_page)          if mobj is not None: -            x = mobj.group(1) + ' ' + mobj.group(2) +            mobj.group(1) + ' ' + mobj.group(2)              upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2))          formats = [{ diff --git a/youtube_dl/extractor/weibo.py b/youtube_dl/extractor/weibo.py index b24297a40..20bb039d3 100644 --- a/youtube_dl/extractor/weibo.py +++ b/youtube_dl/extractor/weibo.py @@ -41,7 +41,7 @@ class WeiboIE(InfoExtractor):          videos_urls = sorted(videos_urls, key=lambda u: 'video.sina.com' in u)          player_url = videos_urls[-1]          m_sina = re.match(r'https?://video\.sina\.com\.cn/v/b/(\d+)-\d+\.html', -            player_url) +                          player_url)          if m_sina is not None:              self.to_screen('Sina video detected')              sina_id = m_sina.group(1) diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py index 1e6c94623..6b37bcbc9 100644 --- a/youtube_dl/extractor/xhamster.py +++ b/youtube_dl/extractor/xhamster.py @@ -67,17 +67,17 @@ class XHamsterIE(InfoExtractor):          description = mobj.group(1) if mobj else None          upload_date = self._html_search_regex(r'hint=\'(\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}\'', -            webpage, 'upload date', fatal=False) +                                              webpage, 'upload date', fatal=False)          if upload_date:              upload_date = unified_strdate(upload_date)          uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)', -            webpage, 'uploader id', default='anonymous') +                                              webpage, 'uploader id', default='anonymous')          thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False)          duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>', -            webpage, 'duration', fatal=False)) +                                                          webpage, 'duration', fatal=False))          view_count = self._html_search_regex(r'<span>Views:</span> ([^<]+)</div>', webpage, 'view count', fatal=False)          if view_count: diff --git a/youtube_dl/extractor/xnxx.py b/youtube_dl/extractor/xnxx.py index 7a73b2430..53ed7ef5a 100644 --- a/youtube_dl/extractor/xnxx.py +++ b/youtube_dl/extractor/xnxx.py @@ -30,14 +30,14 @@ class XNXXIE(InfoExtractor):          webpage = self._download_webpage(url, video_id)          video_url = self._search_regex(r'flv_url=(.*?)&', -            webpage, 'video URL') +                                       webpage, 'video URL')          video_url = compat_urllib_parse.unquote(video_url)          video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XNXX.COM', -            webpage, 'title') +                                              webpage, 'title')          video_thumbnail = self._search_regex(r'url_bigthumb=(.*?)&', -            webpage, 'thumbnail', fatal=False) +                                             webpage, 'thumbnail', fatal=False)          return {              'id': video_id, diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py index 39caf60f2..0fdb12243 100644 --- a/youtube_dl/extractor/yahoo.py +++ b/youtube_dl/extractor/yahoo.py @@ -229,7 +229,7 @@ class YahooSearchIE(SearchInfoExtractor):          for pagenum in itertools.count(0):              result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)              info = self._download_json(result_url, query, -                note='Downloading results page ' + str(pagenum + 1)) +                                       note='Downloading results page ' + str(pagenum + 1))              m = info['m']              results = info['results'] diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py index 830ae6cd9..97b98bbe8 100644 --- a/youtube_dl/extractor/youku.py +++ b/youtube_dl/extractor/youku.py @@ -74,7 +74,7 @@ class YoukuIE(InfoExtractor):              # -8 means blocked outside China.              error = config['data'][0].get('error')  # Chinese and English, separated by newline.              raise ExtractorError(error or 'Server reported error %i' % error_code, -                expected=True) +                                 expected=True)          video_title = config['data'][0]['title']          seed = config['data'][0]['seed'] diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py index ee61e2381..97ef9c17e 100644 --- a/youtube_dl/extractor/youporn.py +++ b/youtube_dl/extractor/youporn.py @@ -64,7 +64,7 @@ class YouPornIE(InfoExtractor):          # Get all of the links from the page          DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'          download_list_html = self._search_regex(DOWNLOAD_LIST_RE, -            webpage, 'download list').strip() +                                                webpage, 'download list').strip()          LINK_RE = r'<a href="([^"]+)">'          links = re.findall(LINK_RE, download_list_html) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 8711b06d4..fa3ead95b 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -950,7 +950,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):                          parts_sizes = self._signature_cache_id(encrypted_sig)                          self.to_screen('{%s} signature length %s, %s' % -                            (format_id, parts_sizes, player_desc)) +                                       (format_id, parts_sizes, player_desc))                      signature = self._decrypt_signature(                          encrypted_sig, video_id, player_url, age_gate) @@ -1214,7 +1214,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):  class YoutubeTopListIE(YoutubePlaylistIE):      IE_NAME = 'youtube:toplist'      IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"' -        ' (Example: "yttoplist:music:Top Tracks")') +               ' (Example: "yttoplist:music:Top Tracks")')      _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'      _TESTS = [{          'url': 'yttoplist:music:Trending', | 
