aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/extractor
diff options
context:
space:
mode:
authorSergey M․ <dstftw@gmail.com>2019-05-11 03:56:22 +0700
committerSergey M․ <dstftw@gmail.com>2019-05-11 03:57:40 +0700
commit3089bc748c0fe72a0361bce3f5e2fbab25175236 (patch)
tree2dbe8468137470f25f851b8d06778e2dcab87d25 /youtube_dl/extractor
parentd23e85515a8f58e276e8ac07bf1fa19f4f1aaec8 (diff)
downloadyoutube-dl-3089bc748c0fe72a0361bce3f5e2fbab25175236.tar.xz
Fix W504 and disable W503 (closes #20863)
Diffstat (limited to 'youtube_dl/extractor')
-rw-r--r--youtube_dl/extractor/addanime.py6
-rw-r--r--youtube_dl/extractor/blinkx.py4
-rw-r--r--youtube_dl/extractor/common.py40
-rw-r--r--youtube_dl/extractor/dailymail.py4
-rw-r--r--youtube_dl/extractor/dctp.py4
-rw-r--r--youtube_dl/extractor/expressen.py4
-rw-r--r--youtube_dl/extractor/frontendmasters.py4
-rw-r--r--youtube_dl/extractor/generic.py14
-rw-r--r--youtube_dl/extractor/heise.py4
-rw-r--r--youtube_dl/extractor/hitbox.py4
-rw-r--r--youtube_dl/extractor/hitrecord.py4
-rw-r--r--youtube_dl/extractor/hketv.py12
-rw-r--r--youtube_dl/extractor/hrti.py4
-rw-r--r--youtube_dl/extractor/infoq.py6
-rw-r--r--youtube_dl/extractor/iqiyi.py6
-rw-r--r--youtube_dl/extractor/itv.py8
-rw-r--r--youtube_dl/extractor/kaltura.py8
-rw-r--r--youtube_dl/extractor/karrierevideos.py4
-rw-r--r--youtube_dl/extractor/motherless.py4
-rw-r--r--youtube_dl/extractor/ndtv.py4
-rw-r--r--youtube_dl/extractor/nextmedia.py4
-rw-r--r--youtube_dl/extractor/niconico.py26
-rw-r--r--youtube_dl/extractor/nrk.py4
-rw-r--r--youtube_dl/extractor/ooyala.py4
-rw-r--r--youtube_dl/extractor/openload.py6
-rw-r--r--youtube_dl/extractor/podomatic.py4
-rw-r--r--youtube_dl/extractor/ruutu.py4
-rw-r--r--youtube_dl/extractor/sbs.py4
-rw-r--r--youtube_dl/extractor/vevo.py4
-rw-r--r--youtube_dl/extractor/vk.py4
-rw-r--r--youtube_dl/extractor/yandexvideo.py6
-rw-r--r--youtube_dl/extractor/youku.py4
-rw-r--r--youtube_dl/extractor/youtube.py18
-rw-r--r--youtube_dl/extractor/zattoo.py4
34 files changed, 122 insertions, 122 deletions
diff --git a/youtube_dl/extractor/addanime.py b/youtube_dl/extractor/addanime.py
index 9f8a71262..5e7c0724e 100644
--- a/youtube_dl/extractor/addanime.py
+++ b/youtube_dl/extractor/addanime.py
@@ -59,9 +59,9 @@ class AddAnimeIE(InfoExtractor):
parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc)
confirm_url = (
- parsed_url.scheme + '://' + parsed_url.netloc +
- action + '?' +
- compat_urllib_parse_urlencode({
+ parsed_url.scheme + '://' + parsed_url.netloc
+ + action + '?'
+ + compat_urllib_parse_urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,
diff --git a/youtube_dl/extractor/blinkx.py b/youtube_dl/extractor/blinkx.py
index 3b8eabe8f..db5e12b21 100644
--- a/youtube_dl/extractor/blinkx.py
+++ b/youtube_dl/extractor/blinkx.py
@@ -32,8 +32,8 @@ class BlinkxIE(InfoExtractor):
video_id = self._match_id(url)
display_id = video_id[:8]
- api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' +
- 'video=%s' % video_id)
+ api_url = ('https://apib4.blinkx.com/api.php?action=play_video&'
+ + 'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0]
duration = None
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 59ad455c1..23b4f372a 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -542,11 +542,11 @@ class InfoExtractor(object):
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
- if (not self._downloader.params.get('geo_bypass_country', None) and
- self._GEO_BYPASS and
- self._downloader.params.get('geo_bypass', True) and
- not self._x_forwarded_for_ip and
- countries):
+ if (not self._downloader.params.get('geo_bypass_country', None)
+ and self._GEO_BYPASS
+ and self._downloader.params.get('geo_bypass', True)
+ and not self._x_forwarded_for_ip
+ and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
@@ -682,8 +682,8 @@ class InfoExtractor(object):
def __check_blocked(self, content):
first_block = content[:512]
- if ('<title>Access to this site is blocked</title>' in content and
- 'Websense' in first_block):
+ if ('<title>Access to this site is blocked</title>' in content
+ and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
@@ -701,8 +701,8 @@ class InfoExtractor(object):
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
- if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
- 'blocklist.rkn.gov.ru' in content):
+ if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
+ and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
@@ -1709,8 +1709,8 @@ class InfoExtractor(object):
continue
else:
tbr = float_or_none(
- last_stream_inf.get('AVERAGE-BANDWIDTH') or
- last_stream_inf.get('BANDWIDTH'), scale=1000)
+ last_stream_inf.get('AVERAGE-BANDWIDTH')
+ or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
@@ -2504,8 +2504,8 @@ class InfoExtractor(object):
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
- height = (int_or_none(s_attr.get('height')) or
- int_or_none(s_attr.get('res')))
+ height = (int_or_none(s_attr.get('height'))
+ or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
@@ -2847,8 +2847,8 @@ class InfoExtractor(object):
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
- if (self._downloader.params.get('writesubtitles', False) or
- self._downloader.params.get('listsubtitles')):
+ if (self._downloader.params.get('writesubtitles', False)
+ or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
@@ -2873,8 +2873,8 @@ class InfoExtractor(object):
return ret
def extract_automatic_captions(self, *args, **kwargs):
- if (self._downloader.params.get('writeautomaticsub', False) or
- self._downloader.params.get('listsubtitles')):
+ if (self._downloader.params.get('writeautomaticsub', False)
+ or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
@@ -2882,9 +2882,9 @@ class InfoExtractor(object):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
- if (self._downloader.params.get('mark_watched', False) and
- (self._get_login_info()[0] is not None or
- self._downloader.params.get('cookiefile') is not None)):
+ if (self._downloader.params.get('mark_watched', False)
+ and (self._get_login_info()[0] is not None
+ or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
diff --git a/youtube_dl/extractor/dailymail.py b/youtube_dl/extractor/dailymail.py
index 4f75a2a30..67b88fd56 100644
--- a/youtube_dl/extractor/dailymail.py
+++ b/youtube_dl/extractor/dailymail.py
@@ -45,8 +45,8 @@ class DailyMailIE(InfoExtractor):
sources_url = (try_get(
video_data,
(lambda x: x['plugins']['sources']['url'],
- lambda x: x['sources']['url']), compat_str) or
- 'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id)
+ lambda x: x['sources']['url']), compat_str)
+ or 'http://www.dailymail.co.uk/api/player/%s/video-sources.json' % video_id)
video_sources = self._download_json(sources_url, video_id)
body = video_sources.get('body')
diff --git a/youtube_dl/extractor/dctp.py b/youtube_dl/extractor/dctp.py
index 769a219df..04ff214f7 100644
--- a/youtube_dl/extractor/dctp.py
+++ b/youtube_dl/extractor/dctp.py
@@ -70,8 +70,8 @@ class DctpTvIE(InfoExtractor):
endpoint = next(
server['endpoint']
for server in servers
- if url_or_none(server.get('endpoint')) and
- 'cloudfront' in server['endpoint'])
+ if url_or_none(server.get('endpoint'))
+ and 'cloudfront' in server['endpoint'])
else:
endpoint = 'rtmpe://s2pqqn4u96e4j8.cloudfront.net/cfx/st/'
diff --git a/youtube_dl/extractor/expressen.py b/youtube_dl/extractor/expressen.py
index 934571472..f79365038 100644
--- a/youtube_dl/extractor/expressen.py
+++ b/youtube_dl/extractor/expressen.py
@@ -82,8 +82,8 @@ class ExpressenIE(InfoExtractor):
title = info.get('titleRaw') or data['title']
description = info.get('descriptionRaw')
thumbnail = info.get('socialMediaImage') or data.get('image')
- duration = int_or_none(info.get('videoTotalSecondsDuration') or
- data.get('totalSecondsDuration'))
+ duration = int_or_none(info.get('videoTotalSecondsDuration')
+ or data.get('totalSecondsDuration'))
timestamp = unified_timestamp(info.get('publishDate'))
return {
diff --git a/youtube_dl/extractor/frontendmasters.py b/youtube_dl/extractor/frontendmasters.py
index cb57ba007..f1db33fb1 100644
--- a/youtube_dl/extractor/frontendmasters.py
+++ b/youtube_dl/extractor/frontendmasters.py
@@ -94,8 +94,8 @@ class FrontendMastersPageBaseIE(FrontendMastersBaseIE):
chapter_number = None
index = lesson.get('index')
element_index = lesson.get('elementIndex')
- if (isinstance(index, int) and isinstance(element_index, int) and
- index < element_index):
+ if (isinstance(index, int) and isinstance(element_index, int)
+ and index < element_index):
chapter_number = element_index - index
chapter = (chapters[chapter_number - 1]
if chapter_number - 1 < len(chapters) else None)
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 495fa4975..3a13c62eb 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -2549,11 +2549,11 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
- mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
- re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
- re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
- re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
- re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
+ mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
+ or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
+ or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
+ or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
+ or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
@@ -3221,8 +3221,8 @@ class GenericIE(InfoExtractor):
else:
formats.append({
'url': src,
- 'ext': (mimetype2ext(src_type) or
- ext if ext in KNOWN_EXTENSIONS else 'mp4'),
+ 'ext': (mimetype2ext(src_type)
+ or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)
diff --git a/youtube_dl/extractor/heise.py b/youtube_dl/extractor/heise.py
index 5c03780a3..d8a2f9d76 100644
--- a/youtube_dl/extractor/heise.py
+++ b/youtube_dl/extractor/heise.py
@@ -155,8 +155,8 @@ class HeiseIE(InfoExtractor):
'id': video_id,
'title': title,
'description': description,
- 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or
- self._og_search_thumbnail(webpage)),
+ 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image')
+ or self._og_search_thumbnail(webpage)),
'timestamp': parse_iso8601(
self._html_search_meta('date', webpage)),
'formats': formats,
diff --git a/youtube_dl/extractor/hitbox.py b/youtube_dl/extractor/hitbox.py
index 1d905dc81..3e5ff2685 100644
--- a/youtube_dl/extractor/hitbox.py
+++ b/youtube_dl/extractor/hitbox.py
@@ -58,8 +58,8 @@ class HitboxIE(InfoExtractor):
title = video_meta.get('media_status')
alt_title = video_meta.get('media_title')
description = clean_html(
- video_meta.get('media_description') or
- video_meta.get('media_description_md'))
+ video_meta.get('media_description')
+ or video_meta.get('media_description_md'))
duration = float_or_none(video_meta.get('media_duration'))
uploader = video_meta.get('media_user_name')
views = int_or_none(video_meta.get('media_views'))
diff --git a/youtube_dl/extractor/hitrecord.py b/youtube_dl/extractor/hitrecord.py
index 01a6946d0..fd5dc2935 100644
--- a/youtube_dl/extractor/hitrecord.py
+++ b/youtube_dl/extractor/hitrecord.py
@@ -47,8 +47,8 @@ class HitRecordIE(InfoExtractor):
tags = [
t['text']
for t in tags_list
- if isinstance(t, dict) and t.get('text') and
- isinstance(t['text'], compat_str)]
+ if isinstance(t, dict) and t.get('text')
+ and isinstance(t['text'], compat_str)]
return {
'id': video_id,
diff --git a/youtube_dl/extractor/hketv.py b/youtube_dl/extractor/hketv.py
index b57927fc1..1f3502b90 100644
--- a/youtube_dl/extractor/hketv.py
+++ b/youtube_dl/extractor/hketv.py
@@ -77,13 +77,13 @@ class HKETVIE(InfoExtractor):
title = (
self._html_search_meta(
- ('ed_title', 'search.ed_title'), webpage, default=None) or
- self._search_regex(
+ ('ed_title', 'search.ed_title'), webpage, default=None)
+ or self._search_regex(
r'data-favorite_title_(?:eng|chi)=(["\'])(?P<id>(?:(?!\1).)+)\1',
- webpage, 'title', default=None, group='url') or
- self._html_search_regex(
- r'<h1>([^<]+)</h1>', webpage, 'title', default=None) or
- self._og_search_title(webpage)
+ webpage, 'title', default=None, group='url')
+ or self._html_search_regex(
+ r'<h1>([^<]+)</h1>', webpage, 'title', default=None)
+ or self._og_search_title(webpage)
)
file_id = self._search_regex(
diff --git a/youtube_dl/extractor/hrti.py b/youtube_dl/extractor/hrti.py
index 9ba1aa703..23f7b1fc9 100644
--- a/youtube_dl/extractor/hrti.py
+++ b/youtube_dl/extractor/hrti.py
@@ -60,8 +60,8 @@ class HRTiBaseIE(InfoExtractor):
language=self._APP_LANGUAGE,
application_id=self._APP_PUBLICATION_ID)
- self._login_url = (modules['user']['resources']['login']['uri'] +
- '/format/json').format(session_id=self._session_id)
+ self._login_url = (modules['user']['resources']['login']['uri']
+ + '/format/json').format(session_id=self._session_id)
self._logout_url = modules['user']['resources']['logout']['uri']
diff --git a/youtube_dl/extractor/infoq.py b/youtube_dl/extractor/infoq.py
index 391c2f5d0..18249cf9b 100644
--- a/youtube_dl/extractor/infoq.py
+++ b/youtube_dl/extractor/infoq.py
@@ -122,9 +122,9 @@ class InfoQIE(BokeCCBaseIE):
formats = self._extract_bokecc_formats(webpage, video_id)
else:
formats = (
- self._extract_rtmp_video(webpage) +
- self._extract_http_video(webpage) +
- self._extract_http_audio(webpage, video_id))
+ self._extract_rtmp_video(webpage)
+ + self._extract_http_video(webpage)
+ + self._extract_http_audio(webpage, video_id))
self._sort_formats(formats)
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
index 4b081bd46..cd11aa70f 100644
--- a/youtube_dl/extractor/iqiyi.py
+++ b/youtube_dl/extractor/iqiyi.py
@@ -383,9 +383,9 @@ class IqiyiIE(InfoExtractor):
self._sleep(5, video_id)
self._sort_formats(formats)
- title = (get_element_by_id('widget-videotitle', webpage) or
- clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage)) or
- self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
+ title = (get_element_by_id('widget-videotitle', webpage)
+ or clean_html(get_element_by_attribute('class', 'mod-play-tit', webpage))
+ or self._html_search_regex(r'<span[^>]+data-videochanged-title="word"[^>]*>([^<]+)</span>', webpage, 'title'))
return {
'id': video_id,
diff --git a/youtube_dl/extractor/itv.py b/youtube_dl/extractor/itv.py
index de65b6bb4..ad2f4eca5 100644
--- a/youtube_dl/extractor/itv.py
+++ b/youtube_dl/extractor/itv.py
@@ -77,10 +77,10 @@ class ITVIE(InfoExtractor):
return etree.SubElement(element, _add_ns(name))
production_id = (
- params.get('data-video-autoplay-id') or
- '%s#001' % (
- params.get('data-video-episode-id') or
- video_id.replace('a', '/')))
+ params.get('data-video-autoplay-id')
+ or '%s#001' % (
+ params.get('data-video-episode-id')
+ or video_id.replace('a', '/')))
req_env = etree.Element(_add_ns('soapenv:Envelope'))
_add_sub_element(req_env, 'soapenv:Header')
diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py
index 79162f665..639d73837 100644
--- a/youtube_dl/extractor/kaltura.py
+++ b/youtube_dl/extractor/kaltura.py
@@ -118,8 +118,8 @@ class KalturaIE(InfoExtractor):
(?P<q2>['"])_?(?P<partner_id>(?:(?!(?P=q2)).)+)(?P=q2),.*?
(?P<q3>['"])entry_?[Ii]d(?P=q3)\s*:\s*
(?P<q4>['"])(?P<id>(?:(?!(?P=q4)).)+)(?P=q4)(?:,|\s*\})
- """, webpage) or
- re.search(
+ """, webpage)
+ or re.search(
r'''(?xs)
(?P<q1>["'])
(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com(?::\d+)?/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)(?:(?!(?P=q1)).)*
@@ -132,8 +132,8 @@ class KalturaIE(InfoExtractor):
\[\s*(?P<q2_1>["'])entry_?[Ii]d(?P=q2_1)\s*\]\s*=\s*
)
(?P<q3>["'])(?P<id>(?:(?!(?P=q3)).)+)(?P=q3)
- ''', webpage) or
- re.search(
+ ''', webpage)
+ or re.search(
r'''(?xs)
<(?:iframe[^>]+src|meta[^>]+\bcontent)=(?P<q1>["'])
(?:https?:)?//(?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/(?:(?!(?P=q1)).)*\b(?:p|partner_id)/(?P<partner_id>\d+)
diff --git a/youtube_dl/extractor/karrierevideos.py b/youtube_dl/extractor/karrierevideos.py
index f236a2f78..7b291e0a0 100644
--- a/youtube_dl/extractor/karrierevideos.py
+++ b/youtube_dl/extractor/karrierevideos.py
@@ -47,8 +47,8 @@ class KarriereVideosIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
- title = (self._html_search_meta('title', webpage, default=None) or
- self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
+ title = (self._html_search_meta('title', webpage, default=None)
+ or self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
video_id = self._search_regex(
r'/config/video/(.+?)\.xml', webpage, 'video id')
diff --git a/youtube_dl/extractor/motherless.py b/youtube_dl/extractor/motherless.py
index d4bd273b6..43fd70f11 100644
--- a/youtube_dl/extractor/motherless.py
+++ b/youtube_dl/extractor/motherless.py
@@ -80,8 +80,8 @@ class MotherlessIE(InfoExtractor):
video_url = (self._html_search_regex(
(r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
- webpage, 'video URL', default=None, group='url') or
- 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
+ webpage, 'video URL', default=None, group='url')
+ or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex(
r'<strong>Views</strong>\s+([^<]+)<',
diff --git a/youtube_dl/extractor/ndtv.py b/youtube_dl/extractor/ndtv.py
index ddec89f2c..bc3eb9160 100644
--- a/youtube_dl/extractor/ndtv.py
+++ b/youtube_dl/extractor/ndtv.py
@@ -84,8 +84,8 @@ class NDTVIE(InfoExtractor):
# '__title' does not contain extra words such as sub-site name, "Video" etc.
title = compat_urllib_parse_unquote_plus(
- self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or
- self._og_search_title(webpage))
+ self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
+ or self._og_search_title(webpage))
filename = self._search_regex(
r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename')
diff --git a/youtube_dl/extractor/nextmedia.py b/youtube_dl/extractor/nextmedia.py
index 680f03aad..7bd1290bf 100644
--- a/youtube_dl/extractor/nextmedia.py
+++ b/youtube_dl/extractor/nextmedia.py
@@ -180,8 +180,8 @@ class AppleDailyIE(NextMediaIE):
_URL_PATTERN = r'\{url: \'(.+)\'\}'
def _fetch_title(self, page):
- return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None) or
- self._html_search_meta('description', page, 'news title'))
+ return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None)
+ or self._html_search_meta('description', page, 'news title'))
def _fetch_thumbnail(self, page):
return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False)
diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py
index 76b412ff1..eb07ca776 100644
--- a/youtube_dl/extractor/niconico.py
+++ b/youtube_dl/extractor/niconico.py
@@ -369,14 +369,14 @@ class NiconicoIE(InfoExtractor):
video_detail = watch_api_data.get('videoDetail', {})
thumbnail = (
- get_video_info(['thumbnail_url', 'thumbnailURL']) or
- self._html_search_meta('image', webpage, 'thumbnail', default=None) or
- video_detail.get('thumbnail'))
+ get_video_info(['thumbnail_url', 'thumbnailURL'])
+ or self._html_search_meta('image', webpage, 'thumbnail', default=None)
+ or video_detail.get('thumbnail'))
description = get_video_info('description')
- timestamp = (parse_iso8601(get_video_info('first_retrieve')) or
- unified_timestamp(get_video_info('postedDateTime')))
+ timestamp = (parse_iso8601(get_video_info('first_retrieve'))
+ or unified_timestamp(get_video_info('postedDateTime')))
if not timestamp:
match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
if match:
@@ -395,9 +395,9 @@ class NiconicoIE(InfoExtractor):
view_count = int_or_none(match.replace(',', ''))
view_count = view_count or video_detail.get('viewCount')
- comment_count = (int_or_none(get_video_info('comment_num')) or
- video_detail.get('commentCount') or
- try_get(api_data, lambda x: x['thread']['commentCount']))
+ comment_count = (int_or_none(get_video_info('comment_num'))
+ or video_detail.get('commentCount')
+ or try_get(api_data, lambda x: x['thread']['commentCount']))
if not comment_count:
match = self._html_search_regex(
r'>Comments: <strong[^>]*>([^<]+)</strong>',
@@ -406,11 +406,11 @@ class NiconicoIE(InfoExtractor):
comment_count = int_or_none(match.replace(',', ''))
duration = (parse_duration(
- get_video_info('length') or
- self._html_search_meta(
- 'video:duration', webpage, 'video duration', default=None)) or
- video_detail.get('length') or
- get_video_info('duration'))
+ get_video_info('length')
+ or self._html_search_meta(
+ 'video:duration', webpage, 'video duration', default=None))
+ or video_detail.get('length')
+ or get_video_info('duration'))
webpage_url = get_video_info('watch_url') or url
diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index 072f920a9..5f43e692f 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -45,8 +45,8 @@ class NRKBaseIE(InfoExtractor):
entries = []
conviva = data.get('convivaStatistics') or {}
- live = (data.get('mediaElementType') == 'Live' or
- data.get('isLive') is True or conviva.get('isLive'))
+ live = (data.get('mediaElementType') == 'Live'
+ or data.get('isLive') is True or conviva.get('isLive'))
def make_title(t):
return self._live_title(t) if live else t
diff --git a/youtube_dl/extractor/ooyala.py b/youtube_dl/extractor/ooyala.py
index e42d67df9..995b24d1b 100644
--- a/youtube_dl/extractor/ooyala.py
+++ b/youtube_dl/extractor/ooyala.py
@@ -31,8 +31,8 @@ class OoyalaBaseIE(InfoExtractor):
title = metadata['title']
auth_data = self._download_json(
- self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
- compat_urllib_parse_urlencode({
+ self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code)
+ + compat_urllib_parse_urlencode({
'domain': domain,
'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds,dash,smooth',
'embedToken': embed_token,
diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py
index f77296f42..a8e906858 100644
--- a/youtube_dl/extractor/openload.py
+++ b/youtube_dl/extractor/openload.py
@@ -43,9 +43,9 @@ def cookie_to_dict(cookie):
if cookie.discard is not None:
cookie_dict['discard'] = cookie.discard
try:
- if (cookie.has_nonstandard_attr('httpOnly') or
- cookie.has_nonstandard_attr('httponly') or
- cookie.has_nonstandard_attr('HttpOnly')):
+ if (cookie.has_nonstandard_attr('httpOnly')
+ or cookie.has_nonstandard_attr('httponly')
+ or cookie.has_nonstandard_attr('HttpOnly')):
cookie_dict['httponly'] = True
except TypeError:
pass
diff --git a/youtube_dl/extractor/podomatic.py b/youtube_dl/extractor/podomatic.py
index 25fcebf9f..e782e3f1f 100644
--- a/youtube_dl/extractor/podomatic.py
+++ b/youtube_dl/extractor/podomatic.py
@@ -50,8 +50,8 @@ class PodomaticIE(InfoExtractor):
video_id = mobj.group('id')
channel = mobj.group('channel') or mobj.group('channel_2')
- json_url = (('%s://%s.podomatic.com/entry/embed_params/%s' +
- '?permalink=true&rtmp=0') %
+ json_url = (('%s://%s.podomatic.com/entry/embed_params/%s'
+ + '?permalink=true&rtmp=0') %
(mobj.group('proto'), channel, video_id))
data_json = self._download_webpage(
json_url, video_id, 'Downloading video info')
diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py
index f05401b36..f984040aa 100644
--- a/youtube_dl/extractor/ruutu.py
+++ b/youtube_dl/extractor/ruutu.py
@@ -91,8 +91,8 @@ class RuutuIE(InfoExtractor):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
- if (not video_url or video_url in processed_urls or
- any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
+ if (not video_url or video_url in processed_urls
+ or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
continue
processed_urls.append(video_url)
ext = determine_ext(video_url)
diff --git a/youtube_dl/extractor/sbs.py b/youtube_dl/extractor/sbs.py
index 845712a76..0e623ff7b 100644
--- a/youtube_dl/extractor/sbs.py
+++ b/youtube_dl/extractor/sbs.py
@@ -55,8 +55,8 @@ class SBSIE(InfoExtractor):
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True)
urls = player_params['releaseUrls']
- theplatform_url = (urls.get('progressive') or urls.get('html') or
- urls.get('standard') or player_params['relatedItemsURL'])
+ theplatform_url = (urls.get('progressive') or urls.get('html')
+ or urls.get('standard') or player_params['relatedItemsURL'])
return {
'_type': 'url_transparent',
diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py
index 4aa72cbd1..232e05816 100644
--- a/youtube_dl/extractor/vevo.py
+++ b/youtube_dl/extractor/vevo.py
@@ -275,8 +275,8 @@ class VevoIE(VevoBaseIE):
genres = video_info.get('genres')
genre = (
- genres[0] if genres and isinstance(genres, list) and
- isinstance(genres[0], compat_str) else None)
+ genres[0] if genres and isinstance(genres, list)
+ and isinstance(genres[0], compat_str) else None)
is_explicit = video_info.get('isExplicit')
if is_explicit is True:
diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py
index 1072550f1..b7ce2fb97 100644
--- a/youtube_dl/extractor/vk.py
+++ b/youtube_dl/extractor/vk.py
@@ -443,8 +443,8 @@ class VKIE(VKBaseIE):
format_url = url_or_none(format_url)
if not format_url or not format_url.startswith(('http', '//', 'rtmp')):
continue
- if (format_id.startswith(('url', 'cache')) or
- format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
+ if (format_id.startswith(('url', 'cache'))
+ or format_id in ('extra_data', 'live_mp4', 'postlive_mp4')):
height = int_or_none(self._search_regex(
r'^(?:url|cache)(\d+)', format_id, 'height', default=None))
formats.append({
diff --git a/youtube_dl/extractor/yandexvideo.py b/youtube_dl/extractor/yandexvideo.py
index 940c24af3..1aea95383 100644
--- a/youtube_dl/extractor/yandexvideo.py
+++ b/youtube_dl/extractor/yandexvideo.py
@@ -70,9 +70,9 @@ class YandexVideoIE(InfoExtractor):
description = content.get('description')
thumbnail = content.get('thumbnail')
- timestamp = (int_or_none(content.get('release_date')) or
- int_or_none(content.get('release_date_ut')) or
- int_or_none(content.get('start_time')))
+ timestamp = (int_or_none(content.get('release_date'))
+ or int_or_none(content.get('release_date_ut'))
+ or int_or_none(content.get('start_time')))
duration = int_or_none(content.get('duration'))
series = content.get('program_title')
age_limit = int_or_none(content.get('restriction_age'))
diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py
index 2f5a7b023..61d1ab209 100644
--- a/youtube_dl/extractor/youku.py
+++ b/youtube_dl/extractor/youku.py
@@ -258,8 +258,8 @@ class YoukuShowIE(InfoExtractor):
transform_source=lambda s: js_to_json(strip_jsonp(s))).get('html')
if playlist_data is None:
return [None, None]
- drama_list = (get_element_by_class('p-drama-grid', playlist_data) or
- get_element_by_class('p-drama-half-row', playlist_data))
+ drama_list = (get_element_by_class('p-drama-grid', playlist_data)
+ or get_element_by_class('p-drama-half-row', playlist_data))
if drama_list is None:
raise ExtractorError('No episodes found')
video_urls = re.findall(r'<a[^>]+href="([^"]+)"', drama_list)
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 8619f3838..06005f8d2 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -2052,8 +2052,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
url_or_none(try_get(
player_response,
lambda x: x['streamingData']['hlsManifestUrl'],
- compat_str)) or
- url_or_none(try_get(
+ compat_str))
+ or url_or_none(try_get(
video_info, lambda x: x['hlsvp'][0], compat_str)))
if manifest_url:
formats = []
@@ -2102,10 +2102,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self._downloader.report_warning('unable to extract uploader nickname')
channel_id = (
- str_or_none(video_details.get('channelId')) or
- self._html_search_meta(
- 'channelId', video_webpage, 'channel id', default=None) or
- self._search_regex(
+ str_or_none(video_details.get('channelId'))
+ or self._html_search_meta(
+ 'channelId', video_webpage, 'channel id', default=None)
+ or self._search_regex(
r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1',
video_webpage, 'channel id', default=None, group='id'))
channel_url = 'http://www.youtube.com/channel/%s' % channel_id if channel_id else None
@@ -2564,9 +2564,9 @@ class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
- search_title('playlist-title') or
- search_title('title long-title') or
- search_title('title'))
+ search_title('playlist-title')
+ or search_title('title long-title')
+ or search_title('title'))
title = clean_html(title_span)
return self.playlist_result(url_results, playlist_id, title)
diff --git a/youtube_dl/extractor/zattoo.py b/youtube_dl/extractor/zattoo.py
index ee514666b..6bac3026e 100644
--- a/youtube_dl/extractor/zattoo.py
+++ b/youtube_dl/extractor/zattoo.py
@@ -86,8 +86,8 @@ class ZattooPlatformBaseIE(InfoExtractor):
return next(
chan['cid'] for chan in channel_list
if chan.get('cid') and (
- chan.get('display_alias') == channel_name or
- chan.get('cid') == channel_name))
+ chan.get('display_alias') == channel_name
+ or chan.get('cid') == channel_name))
except StopIteration:
raise ExtractorError('Could not extract channel id')