diff options
author | Jouke Waleson <jouke.waleson@mendix.com> | 2014-11-23 21:20:46 +0100 |
---|---|---|
committer | Jouke Waleson <jouke.waleson@mendix.com> | 2014-11-23 21:20:46 +0100 |
commit | 8bcc875676b56c062a4fdd81763a6adb0fb1390c (patch) | |
tree | 429ecb8bb9ee320a0dd72e064ec251f588d479bf | |
parent | 5f6a1245ffa9276c1af59b0835afeef67e2fb5b1 (diff) |
PEP8: more applied
34 files changed, 124 insertions, 133 deletions
diff --git a/devscripts/gh-pages/generate-download.py b/devscripts/gh-pages/generate-download.py index 55912e12c..e90c787fd 100755 --- a/devscripts/gh-pages/generate-download.py +++ b/devscripts/gh-pages/generate-download.py @@ -1,8 +1,5 @@ #!/usr/bin/env python3 import hashlib -import shutil -import subprocess -import tempfile import urllib.request import json diff --git a/devscripts/transition_helper_exe/youtube-dl.py b/devscripts/transition_helper_exe/youtube-dl.py index edb449fb3..d75316cfa 100644 --- a/devscripts/transition_helper_exe/youtube-dl.py +++ b/devscripts/transition_helper_exe/youtube-dl.py @@ -32,7 +32,7 @@ def rsa_verify(message, signature, key): signature = signature[2:] if not b('\x00') in signature: return False - signature = signature[signature.index(b('\x00'))+1:] + signature = signature[signature.index(b('\x00')) +1:] if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False signature = signature[19:] diff --git a/test/test_utils.py b/test/test_utils.py index 380d1059d..8307599b3 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -45,7 +45,6 @@ from youtube_dl.utils import ( escape_rfc3986, escape_url, js_to_json, - get_filesystem_encoding, intlist_to_bytes, args_to_str, ) diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py index 452d342ae..4ceba64ae 100644 --- a/test/test_youtube_lists.py +++ b/test/test_youtube_lists.py @@ -12,10 +12,6 @@ from test.helper import FakeYDL from youtube_dl.extractor import ( YoutubePlaylistIE, YoutubeIE, - YoutubeChannelIE, - YoutubeShowIE, - YoutubeTopListIE, - YoutubeSearchURLIE, ) diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index c1a529f13..d9e93f5d2 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -29,7 +29,6 @@ from .compat import ( compat_str, compat_urllib_error, compat_urllib_request, - shlex_quote, ) from .utils import ( escape_url, diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index ee3067134..427b6ad27 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -76,10 +76,10 @@ def _real_main(argv=None): if opts.headers is not None: for h in opts.headers: if h.find(':', 1) < 0: - parser.error('wrong header formatting, it should be key:value, not "%s"'%h) + parser.error('wrong header formatting, it should be key:value, not "%s"' % h) key, value = h.split(':', 2) if opts.verbose: - write_string('[debug] Adding header from command line option %s:%s\n'%(key, value)) + write_string('[debug] Adding header from command line option %s:%s\n' % (key, value)) std_headers[key] = value # Dump user agent @@ -197,13 +197,13 @@ def _real_main(argv=None): if opts.outtmpl is not None: opts.outtmpl = opts.outtmpl.decode(preferredencoding()) outtmpl = ((opts.outtmpl is not None and opts.outtmpl) - or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') - or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') - or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') - or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') - or (opts.useid and '%(id)s.%(ext)s') - or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') - or DEFAULT_OUTTMPL) + or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') + or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') + or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') + or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') + or (opts.useid and '%(id)s.%(ext)s') + or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') + or DEFAULT_OUTTMPL) if not os.path.splitext(outtmpl)[1] and opts.extractaudio: parser.error('Cannot download a video and extract audio into the same' ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' diff --git a/youtube_dl/aes.py b/youtube_dl/aes.py index ccfd73770..2db8ab3a1 100644 --- a/youtube_dl/aes.py +++ b/youtube_dl/aes.py @@ -24,8 +24,8 @@ def aes_ctr_decrypt(data, key, counter): decrypted_data = [] for i in range(block_count): counter_block = counter.next_value() - block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES] - block += [0]*(BLOCK_SIZE_BYTES - len(block)) + block = data[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES] + block += [0] *(BLOCK_SIZE_BYTES - len(block)) cipher_counter_block = aes_encrypt(counter_block, expanded_key) decrypted_data += xor(block, cipher_counter_block) @@ -49,8 +49,8 @@ def aes_cbc_decrypt(data, key, iv): decrypted_data = [] previous_cipher_block = iv for i in range(block_count): - block = data[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES] - block += [0]*(BLOCK_SIZE_BYTES - len(block)) + block = data[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES] + block += [0] *(BLOCK_SIZE_BYTES - len(block)) decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) @@ -76,20 +76,20 @@ def key_expansion(data): temp = data[-4:] temp = key_schedule_core(temp, rcon_iteration) rcon_iteration += 1 - data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) for _ in range(3): temp = data[-4:] - data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) if key_size_bytes == 32: temp = data[-4:] temp = sub_bytes(temp) - data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): temp = data[-4:] - data += xor(temp, data[-key_size_bytes: 4-key_size_bytes]) + data += xor(temp, data[-key_size_bytes: 4 -key_size_bytes]) data = data[:expanded_key_size_bytes] return data @@ -106,12 +106,12 @@ def aes_encrypt(data, expanded_key): rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) - for i in range(1, rounds+1): + for i in range(1, rounds +1): data = sub_bytes(data) data = shift_rows(data) if i != rounds: data = mix_columns(data) - data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]) + data = xor(data, expanded_key[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES]) return data @@ -127,7 +127,7 @@ def aes_decrypt(data, expanded_key): rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 for i in range(rounds, 0, -1): - data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES: (i+1)*BLOCK_SIZE_BYTES]) + data = xor(data, expanded_key[i *BLOCK_SIZE_BYTES: (i +1) *BLOCK_SIZE_BYTES]) if i != rounds: data = mix_columns_inv(data) data = shift_rows_inv(data) @@ -155,14 +155,14 @@ def aes_decrypt_text(data, password, key_size_bytes): data = bytes_to_intlist(base64.b64decode(data)) password = bytes_to_intlist(password.encode('utf-8')) - key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password)) + key = password[:key_size_bytes] + [0] *(key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) nonce = data[:NONCE_LENGTH_BYTES] cipher = data[NONCE_LENGTH_BYTES:] class Counter: - __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) + __value = nonce + [0] *(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) def next_value(self): temp = self.__value @@ -293,7 +293,7 @@ def mix_column(data, matrix): def mix_columns(data, matrix=MIX_COLUMN_MATRIX): data_mixed = [] for i in range(4): - column = data[i*4: (i+1)*4] + column = data[i *4: (i +1) *4] data_mixed += mix_column(column, matrix) return data_mixed @@ -320,7 +320,7 @@ def shift_rows_inv(data): def inc(data): data = data[:] # copy - for i in range(len(data)-1, -1, -1): + for i in range(len(data) -1, -1, -1): if data[i] == 255: data[i] = 0 else: diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py index b607f6485..ed8df16d2 100644 --- a/youtube_dl/downloader/f4m.py +++ b/youtube_dl/downloader/f4m.py @@ -55,7 +55,7 @@ class FlvReader(io.BytesIO): if size == 1: real_size = self.read_unsigned_long_long() header_end = 16 - return real_size, box_type, self.read(real_size-header_end) + return real_size, box_type, self.read(real_size -header_end) def read_asrt(self): # version @@ -180,7 +180,7 @@ def build_fragments_list(boot_info): n_frags = segment_run_entry[1] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] - for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)): + for (i, frag_number) in zip(range(1, n_frags +1), itertools.count(first_frag_number)): res.append((1, frag_number)) return res diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py index b8e104c59..642f13b9a 100644 --- a/youtube_dl/downloader/rtmp.py +++ b/youtube_dl/downloader/rtmp.py @@ -46,13 +46,13 @@ class RtmpFD(FileDownloader): continue mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) if mobj: - downloaded_data_len = int(float(mobj.group(1))*1024) + downloaded_data_len = int(float(mobj.group(1)) *1024) percent = float(mobj.group(2)) if not resume_percent: resume_percent = percent resume_downloaded_data_len = downloaded_data_len - eta = self.calc_eta(start, time.time(), 100-resume_percent, percent-resume_percent) - speed = self.calc_speed(start, time.time(), downloaded_data_len-resume_downloaded_data_len) + eta = self.calc_eta(start, time.time(), 100 -resume_percent, percent -resume_percent) + speed = self.calc_speed(start, time.time(), downloaded_data_len -resume_downloaded_data_len) data_len = None if percent > 0: data_len = int(downloaded_data_len * 100 / percent) @@ -72,7 +72,7 @@ class RtmpFD(FileDownloader): # no percent for live streams mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) if mobj: - downloaded_data_len = int(float(mobj.group(1))*1024) + downloaded_data_len = int(float(mobj.group(1)) *1024) time_now = time.time() speed = self.calc_speed(start, time_now, downloaded_data_len) self.report_progress_live_stream(downloaded_data_len, speed, time_now - start) @@ -88,7 +88,7 @@ class RtmpFD(FileDownloader): if not cursor_in_new_line: self.to_screen('') cursor_in_new_line = True - self.to_screen('[rtmpdump] '+line) + self.to_screen('[rtmpdump] ' +line) proc.wait() if not cursor_in_new_line: self.to_screen('') diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index fcb75af34..43185345e 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -529,4 +529,4 @@ def gen_extractors(): def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" - return globals()[ie_name+'IE'] + return globals()[ie_name + 'IE'] diff --git a/youtube_dl/extractor/bbccouk.py b/youtube_dl/extractor/bbccouk.py index 89476f032..fa15bf19c 100644 --- a/youtube_dl/extractor/bbccouk.py +++ b/youtube_dl/extractor/bbccouk.py @@ -195,7 +195,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor): duration = int(item.get('duration')) media_selection = self._download_xml( - 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id, + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id, programme_id, 'Downloading media selection XML') for media in self._extract_medias(media_selection): diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py index 31fe906b4..bdc84f1f5 100644 --- a/youtube_dl/extractor/cinemassacre.py +++ b/youtube_dl/extractor/cinemassacre.py @@ -77,7 +77,7 @@ class CinemassacreIE(InfoExtractor): if videolist_url: videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML') formats = [] - baseurl = vidurl[:vidurl.rfind('/')+1] + baseurl = vidurl[:vidurl.rfind('/') +1] for video in videolist.findall('.//video'): src = video.get('src') if not src: diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index c45858160..ab03c8602 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -226,10 +226,10 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text formats = [] for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage): stream_quality, stream_format = self._FORMAT_IDS[fmt] - video_format = fmt+'p' + video_format = fmt +'p' streamdata_req = compat_urllib_request.Request('http://www.crunchyroll.com/xml/') # urlencode doesn't work! - streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format + streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality=' +stream_quality +'&media%5Fid=' +stream_id +'&video%5Fformat=' +stream_format streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') streamdata_req.add_header('Content-Length', str(len(streamdata_req.data))) streamdata = self._download_xml( @@ -248,8 +248,8 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text subtitles = {} sub_format = self._downloader.params.get('subtitlesformat', 'srt') for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): - sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\ - video_id, note='Downloading subtitles for '+sub_name) + sub_page = self._download_webpage('http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' +sub_id,\ + video_id, note='Downloading subtitles for ' +sub_name) id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False) iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False) data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False) @@ -274,14 +274,14 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text return return { - 'id': video_id, - 'title': video_title, + 'id': video_id, + 'title': video_title, 'description': video_description, - 'thumbnail': video_thumbnail, - 'uploader': video_uploader, + 'thumbnail': video_thumbnail, + 'uploader': video_uploader, 'upload_date': video_upload_date, - 'subtitles': subtitles, - 'formats': formats, + 'subtitles': subtitles, + 'formats': formats, } diff --git a/youtube_dl/extractor/dotsub.py b/youtube_dl/extractor/dotsub.py index 70e368021..b30d70e7b 100644 --- a/youtube_dl/extractor/dotsub.py +++ b/youtube_dl/extractor/dotsub.py @@ -27,7 +27,7 @@ class DotsubIE(InfoExtractor): video_id = mobj.group('id') info_url = "https://dotsub.com/api/media/%s/metadata" % video_id info = self._download_json(info_url, video_id) - date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds + date = time.gmtime(info['dateCreated'] /1000) # The timestamp is in miliseconds return { 'id': video_id, diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 2dc801ad9..a40ff6b64 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -748,7 +748,7 @@ class GenericIE(InfoExtractor): # Look for embedded blip.tv player mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage) if mobj: - return self.url_result('http://blip.tv/a/a-'+mobj.group(1), 'BlipTV') + return self.url_result('http://blip.tv/a/a-' +mobj.group(1), 'BlipTV') mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage) if mobj: return self.url_result(mobj.group(1), 'BlipTV') diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py index d1defd363..90b4c966d 100644 --- a/youtube_dl/extractor/iprima.py +++ b/youtube_dl/extractor/iprima.py @@ -54,7 +54,7 @@ class IPrimaIE(InfoExtractor): player_url = ( 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % - (floor(random()*1073741824), floor(random()*1073741824)) + (floor(random() *1073741824), floor(random() *1073741824)) ) req = compat_urllib_request.Request(player_url) diff --git a/youtube_dl/extractor/lifenews.py b/youtube_dl/extractor/lifenews.py index 1f1e23dc3..e7ee3bba8 100644 --- a/youtube_dl/extractor/lifenews.py +++ b/youtube_dl/extractor/lifenews.py @@ -71,4 +71,4 @@ class LifeNewsIE(InfoExtractor): if len(videos) == 1: return make_entry(video_id, videos[0]) else: - return [make_entry(video_id, media, video_number+1) for video_number, media in enumerate(videos)] + return [make_entry(video_id, media, video_number +1) for video_number, media in enumerate(videos)] diff --git a/youtube_dl/extractor/liveleak.py b/youtube_dl/extractor/liveleak.py index 8e50e8f79..b04be1e8c 100644 --- a/youtube_dl/extractor/liveleak.py +++ b/youtube_dl/extractor/liveleak.py @@ -19,8 +19,7 @@ class LiveLeakIE(InfoExtractor): 'uploader': 'ljfriel2', 'title': 'Most unlucky car accident' } - }, - { + }, { 'url': 'http://www.liveleak.com/view?i=f93_1390833151', 'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf', 'info_dict': { @@ -30,8 +29,7 @@ class LiveLeakIE(InfoExtractor): 'uploader': 'ARD_Stinkt', 'title': 'German Television does first Edward Snowden Interview (ENGLISH)', } - }, - { + }, { 'url': 'http://www.liveleak.com/view?i=4f7_1392687779', 'md5': '42c6d97d54f1db107958760788c5f48f', 'info_dict': { diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index 1a896b536..f68add6c0 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -22,7 +22,7 @@ class MetacafeIE(InfoExtractor): # Youtube video { 'add_ie': ['Youtube'], - 'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/', + 'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/', 'info_dict': { 'id': '_aUehQsCQtM', 'ext': 'mp4', diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index b2f37f692..b6755ff01 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -245,7 +245,7 @@ class MTVIE(MTVServicesInfoExtractor): m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage, re.DOTALL) if m_vevo: - vevo_id = m_vevo.group(1); + vevo_id = m_vevo.group(1) self.to_screen('Vevo video detected: %s' % vevo_id) return self.url_result('vevo:%s' % vevo_id, ie='Vevo') diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index bebcafb62..875d09faa 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -69,7 +69,7 @@ class SohuIE(InfoExtractor): (allot, prot, clipsURL[i], su[i])) part_str = self._download_webpage( part_url, video_id, - note=u'Downloading part %d of %d' % (i+1, part_count)) + note=u'Downloading part %d of %d' % (i +1, part_count)) part_info = part_str.split('|') video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index c830d9da7..f92f7fa24 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -224,7 +224,7 @@ class SoundcloudIE(InfoExtractor): # extract uploader (which is in the url) uploader = mobj.group('uploader') # extract simple title (uploader + slug of song title) - slug_title = mobj.group('title') + slug_title = mobj.group('title') token = mobj.group('token') full_title = resolve_title = '%s/%s' % (uploader, slug_title) if token: diff --git a/youtube_dl/extractor/swrmediathek.py b/youtube_dl/extractor/swrmediathek.py index bf430d870..4132b6428 100644 --- a/youtube_dl/extractor/swrmediathek.py +++ b/youtube_dl/extractor/swrmediathek.py @@ -80,7 +80,7 @@ class SWRMediathekIE(InfoExtractor): if media_type == 'Video': fmt.update({ - 'format_note': ['144p', '288p', '544p', '720p'][quality-1], + 'format_note': ['144p', '288p', '544p', '720p'][quality -1], 'vcodec': codec, }) elif media_type == 'Audio': diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py index b83d15b90..40b22677b 100644 --- a/youtube_dl/extractor/theplatform.py +++ b/youtube_dl/extractor/theplatform.py @@ -118,5 +118,5 @@ class ThePlatformIE(InfoExtractor): 'formats': formats, 'description': info['description'], 'thumbnail': info['defaultThumbnailUrl'], - 'duration': info['duration']//1000, + 'duration': info['duration'] //1000, } diff --git a/youtube_dl/extractor/thisav.py b/youtube_dl/extractor/thisav.py index 350a5cdb5..7f323c938 100644 --- a/youtube_dl/extractor/thisav.py +++ b/youtube_dl/extractor/thisav.py @@ -38,10 +38,10 @@ class ThisAVIE(InfoExtractor): ext = determine_ext(video_url) return { - 'id': video_id, - 'url': video_url, - 'uploader': uploader, + 'id': video_id, + 'url': video_url, + 'uploader': uploader, 'uploader_id': uploader_id, - 'title': title, - 'ext': ext, + 'title': title, + 'ext': ext, } diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py index c8780790a..3007b136f 100644 --- a/youtube_dl/extractor/tudou.py +++ b/youtube_dl/extractor/tudou.py @@ -37,7 +37,7 @@ class TudouIE(InfoExtractor): }] def _url_for_id(self, id, quality = None): - info_url = "http://v2.tudou.com/f?id="+str(id) + info_url = "http://v2.tudou.com/f?id=" +str(id) if quality: info_url += '&hd' + quality webpage = self._download_webpage(info_url, id, "Opening the info webpage") diff --git a/youtube_dl/extractor/videofyme.py b/youtube_dl/extractor/videofyme.py index 2dd51d866..d69fe1e77 100644 --- a/youtube_dl/extractor/videofyme.py +++ b/youtube_dl/extractor/videofyme.py @@ -13,7 +13,7 @@ class VideofyMeIE(InfoExtractor): _TEST = { u'url': u'http://www.videofy.me/thisisvideofyme/1100701', - u'file': u'1100701.mp4', + u'file': u'1100701.mp4', u'md5': u'c77d700bdc16ae2e9f3c26019bd96143', u'info_dict': { u'title': u'This is VideofyMe', diff --git a/youtube_dl/extractor/videopremium.py b/youtube_dl/extractor/videopremium.py index 65463c733..9f2cdb6ef 100644 --- a/youtube_dl/extractor/videopremium.py +++ b/youtube_dl/extractor/videopremium.py @@ -35,11 +35,11 @@ class VideoPremiumIE(InfoExtractor): r'<h2(?:.*?)>\s*(.+?)\s*<', webpage, u'video title') return { - 'id': video_id, - 'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16), - 'play_path': "mp4:%s.f4v" % video_id, - 'page_url': "http://videopremium.tv/" + video_id, - 'player_url': "http://videopremium.tv/uplayer/uppod.swf", - 'ext': 'f4v', - 'title': video_title, + 'id': video_id, + 'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16), + 'play_path': "mp4:%s.f4v" % video_id, + 'page_url': "http://videopremium.tv/" + video_id, + 'player_url': "http://videopremium.tv/uplayer/uppod.swf", + 'ext': 'f4v', + 'title': video_title, } diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index c3bb9b2cf..26a51340f 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -97,7 +97,7 @@ class XTubeUserIE(InfoExtractor): url, username, note='Retrieving profile page') video_count = int(self._search_regex( - r'<strong>%s\'s Videos \(([0-9]+)\)</strong>'%username, profile_page, + r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' %username, profile_page, 'video count')) PAGE_SIZE = 25 diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py index 117f0856a..514c16127 100644 --- a/youtube_dl/extractor/yahoo.py +++ b/youtube_dl/extractor/yahoo.py @@ -229,7 +229,7 @@ class YahooSearchIE(SearchInfoExtractor): for pagenum in itertools.count(0): result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30) info = self._download_json(result_url, query, - note='Downloading results page '+str(pagenum+1)) + note='Downloading results page ' +str(pagenum +1)) m = info['m'] results = info['results'] diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py index e155b3a94..830ae6cd9 100644 --- a/youtube_dl/extractor/youku.py +++ b/youtube_dl/extractor/youku.py @@ -45,8 +45,8 @@ class YoukuIE(InfoExtractor): source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890") seed = float(seed) for i in range(len(source)): - seed = (seed * 211 + 30031) % 65536 - index = math.floor(seed / 65536 * len(source)) + seed = (seed * 211 + 30031) % 65536 + index = math.floor(seed / 65536 * len(source)) mixed.append(source[int(index)]) source.remove(source[int(index)]) # return ''.join(mixed) diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 98cac7c17..8711b06d4 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -77,25 +77,25 @@ class YoutubeBaseInfoExtractor(InfoExtractor): # Log in login_form_strs = { - 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', - 'Email': username, - 'GALX': galx, - 'Passwd': password, - - 'PersistentCookie': 'yes', - '_utf8': '霱', - 'bgresponse': 'js_disabled', - 'checkConnection': '', - 'checkedDomains': 'youtube', - 'dnConn': '', - 'pstMsg': '0', - 'rmShown': '1', - 'secTok': '', - 'signIn': 'Sign in', - 'timeStmp': '', - 'service': 'youtube', - 'uilel': '3', - 'hl': 'en_US', + 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', + 'Email': username, + 'GALX': galx, + 'Passwd': password, + + 'PersistentCookie': 'yes', + '_utf8': '霱', + 'bgresponse': 'js_disabled', + 'checkConnection': '', + 'checkedDomains': 'youtube', + 'dnConn': '', + 'pstMsg': '0', + 'rmShown': '1', + 'secTok': '', + 'signIn': 'Sign in', + 'timeStmp': '', + 'service': 'youtube', + 'uilel': '3', + 'hl': 'en_US', } # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode @@ -181,8 +181,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor): 'next_url': '/', 'action_confirm': 'Confirm', } - req = compat_urllib_request.Request(self._AGE_URL, - compat_urllib_parse.urlencode(age_form).encode('ascii')) + req = compat_urllib_request.Request( + self._AGE_URL, + compat_urllib_parse.urlencode(age_form).encode('ascii') + ) self._download_webpage( req, None, @@ -492,7 +494,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): def gen_sig_code(idxs): def _genslice(start, end, step): starts = '' if start == 0 else str(start) - ends = (':%d' % (end+step)) if end + step >= 0 else ':' + ends = (':%d' % (end + step)) if end + step >= 0 else ':' steps = '' if step == 1 else (':%d' % step) return 's[%s%s%s]' % (starts, ends, steps) @@ -530,7 +532,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): def _parse_sig_js(self, jscode): funcname = self._search_regex( r'\.sig\|\|([a-zA-Z0-9]+)\(', jscode, - 'Initial JS player signature function name') + 'Initial JS player signature function name') jsi = JSInterpreter(jscode) initial_function = jsi.extract_function(funcname) @@ -656,7 +658,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): def _get_urls(_manifest): lines = _manifest.split('\n') urls = filter(lambda l: l and not l.startswith('#'), - lines) + lines) return urls manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest') formats_urls = _get_urls(manifest) @@ -723,10 +725,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): age_gate = False for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' - % (video_id, el_type)) + % (video_id, el_type)) video_info_webpage = self._download_webpage(video_info_url, video_id, - note=False, - errnote='unable to download video info webpage') + note=False, + errnote='unable to download video info webpage') video_info = compat_parse_qs(video_info_webpage) if 'token' in video_info: break @@ -1017,23 +1019,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): self._sort_formats(formats) return { - 'id': video_id, - 'uploader': video_uploader, - 'uploader_id': video_uploader_id, - 'upload_date': upload_date, - 'title': video_title, - 'thumbnail': video_thumbnail, - 'description': video_description, - 'categories': video_categories, - 'subtitles': video_subtitles, - 'duration': video_duration, - 'age_limit': 18 if age_gate else 0, - 'annotations': video_annotations, + 'id': video_id, + 'uploader': video_uploader, + 'uploader_id': video_uploader_id, + 'upload_date': upload_date, + 'title': video_title, + 'thumbnail': video_thumbnail, + 'description': video_description, + 'categories': video_categories, + 'subtitles': video_subtitles, + 'duration': video_duration, + 'age_limit': 18 if age_gate else 0, + 'annotations': video_annotations, 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id, - 'view_count': view_count, + 'view_count': view_count, 'like_count': like_count, 'dislike_count': dislike_count, - 'formats': formats, + 'formats': formats, } @@ -1167,7 +1169,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): return self._extract_mix(playlist_id) if playlist_id.startswith('TL'): raise ExtractorError('For downloading YouTube.com top lists, use ' - 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True) + 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True) url = self._TEMPLATE_URL % playlist_id page = self._download_webpage(url, playlist_id) @@ -1546,8 +1548,8 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): paging = 0 for i in itertools.count(1): info = self._download_json(self._FEED_TEMPLATE % paging, - '%s feed' % self._FEED_NAME, - 'Downloading page %s' % i) + '%s feed' % self._FEED_NAME, + 'Downloading page %s' % i) feed_html = info.get('feed_html') or info.get('content_html') load_more_widget_html = info.get('load_more_widget_html') or feed_html m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html) diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py index dfae735f3..d9f1cceb9 100644 --- a/youtube_dl/postprocessor/ffmpeg.py +++ b/youtube_dl/postprocessor/ffmpeg.py @@ -246,7 +246,7 @@ class FFmpegVideoConvertor(FFmpegPostProcessor): if information['ext'] == self._preferedformat: self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat)) return True, information - self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) + self._downloader.to_screen(u'[' +'ffmpeg' +'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath) self.run_ffmpeg(path, outpath, []) information['filepath'] = outpath information['format'] = self._preferedformat @@ -466,7 +466,7 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy'] for (i, lang) in enumerate(sub_langs): - opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text']) + opts.extend(['-map', '%d:0' % (i +1), '-c:s:%d' % i, 'mov_text']) lang_code = self._conver_lang_code(lang) if lang_code is not None: opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) diff --git a/youtube_dl/update.py b/youtube_dl/update.py index d32a86708..23457e07a 100644 --- a/youtube_dl/update.py +++ b/youtube_dl/update.py @@ -41,7 +41,7 @@ def rsa_verify(message, signature, key): signature = signature[2:] if not b('\x00') in signature: return False - signature = signature[signature.index(b('\x00'))+1:] + signature = signature[signature.index(b('\x00')) +1:] if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False signature = signature[19:] |