diff options
| -rw-r--r-- | youtube_dl/FileDownloader.py | 7 | ||||
| -rwxr-xr-x | youtube_dl/InfoExtractors.py | 10 | ||||
| -rw-r--r-- | youtube_dl/__init__.py | 4 | 
3 files changed, 9 insertions, 12 deletions
| diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index a861086c3..d9a4ecd3a 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -433,11 +433,8 @@ class FileDownloader(object):              try:                  srtfn = filename.rsplit('.', 1)[0] + u'.srt'                  self.report_writesubtitles(srtfn) -                srtfile = open(encodeFilename(srtfn), 'wb') -                try: -                    srtfile.write(info_dict['subtitles'].encode('utf-8')) -                finally: -                    srtfile.close() +                with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile: +                    srtfile.write(info_dict['subtitles'])              except (OSError, IOError):                  self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)                  return diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index aa4a6500b..6201ccad7 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -235,7 +235,7 @@ class YoutubeIE(InfoExtractor):          elif 'en' in srt_lang_list:              srt_lang = 'en'          else: -            srt_lang = srt_lang_list.keys()[0] +            srt_lang = list(srt_lang_list.keys())[0]          if not srt_lang in srt_lang_list:              return (u'WARNING: no closed captions found in the specified language', None)          request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) @@ -445,7 +445,7 @@ class YoutubeIE(InfoExtractor):          elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1:              url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',')              url_data = [compat_parse_qs(uds) for uds in url_data_strs] -            url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) +            url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud]              url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data)              format_limit = self._downloader.params.get('format_limit', None) @@ -2115,7 +2115,7 @@ class FacebookIE(InfoExtractor):          video_description = video_info.get('description', 'No description available.')          url_map = video_info['video_urls'] -        if len(url_map.keys()) > 0: +        if len(list(url_map.keys())) > 0:              # Decide which formats to download              req_format = self._downloader.params.get('format', None)              format_limit = self._downloader.params.get('format_limit', None) @@ -2975,7 +2975,7 @@ class MixcloudIE(InfoExtractor):                  if file_url is not None:                      break # got it!          else: -            if req_format not in formats.keys(): +            if req_format not in list(formats.keys()):                  self._downloader.trouble(u'ERROR: format is not available')                  return @@ -3274,7 +3274,7 @@ class YoukuIE(InfoExtractor):              seed = config['data'][0]['seed']              format = self._downloader.params.get('format', None) -            supported_format = config['data'][0]['streamfileids'].keys() +            supported_format = list(config['data'][0]['streamfileids'].keys())              if format is None or format == 'best':                  if 'hd2' in supported_format: diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index efa8b813f..9b25ab3a2 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -453,8 +453,8 @@ def _real_main():      if opts.list_extractors:          for ie in extractors:              print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) -            matchedUrls = filter(lambda url: ie.suitable(url), all_urls) -            all_urls = filter(lambda url: url not in matchedUrls, all_urls) +            matchedUrls = [url for url in all_urls if ie.suitable(url)] +            all_urls = [url for url in all_urls if url not in matchedUrls]              for mu in matchedUrls:                  print(u'  ' + mu)          sys.exit(0) | 
