diff options
55 files changed, 847 insertions, 295 deletions
| @@ -123,6 +123,7 @@ which means you can modify it, redistribute it or use it however you like.      --get-description          simulate, quiet but print video description      --get-filename             simulate, quiet but print output filename      --get-format               simulate, quiet but print output format +    -j, --dump-json            simulate, quiet but print JSON information      --newline                  output progress bar as new lines      --no-progress              do not print progress bar      --console-title            display progress in console titlebar @@ -48,7 +48,7 @@ else:          'data_files': [  # Installing system-wide would require sudo...              ('etc/bash_completion.d', ['youtube-dl.bash-completion']),              ('share/doc/youtube_dl', ['README.txt']), -            ('share/man/man1/', ['youtube-dl.1']) +            ('share/man/man1', ['youtube-dl.1'])          ]      }      if setuptools_available: diff --git a/test/test_age_restriction.py b/test/test_age_restriction.py index d500c6edc..506572e9e 100644 --- a/test/test_age_restriction.py +++ b/test/test_age_restriction.py @@ -24,7 +24,7 @@ def _download_restricted(url, filename, age):      }      ydl = YoutubeDL(params)      ydl.add_default_info_extractors() -    json_filename = filename + '.info.json' +    json_filename = os.path.splitext(filename)[0] + '.info.json'      try_rm(json_filename)      ydl.download([url])      res = os.path.exists(json_filename) diff --git a/test/test_download.py b/test/test_download.py index 16f200809..fe7f7b8cb 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -103,7 +103,7 @@ def generator(test_case):                  tc_filename = get_tc_filename(tc)                  try_rm(tc_filename)                  try_rm(tc_filename + '.part') -                try_rm(tc_filename + '.info.json') +                try_rm(os.path.splitext(tc_filename)[0] + '.info.json')          try_rm_tcs_files()          try:              try_num = 1 @@ -130,11 +130,12 @@ def generator(test_case):                  if not test_case.get('params', {}).get('skip_download', False):                      self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)                      self.assertTrue(tc_filename in finished_hook_called) -                self.assertTrue(os.path.exists(tc_filename + '.info.json')) +                info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json' +                self.assertTrue(os.path.exists(info_json_fn))                  if 'md5' in tc:                      md5_for_file = _file_md5(tc_filename)                      self.assertEqual(md5_for_file, tc['md5']) -                with io.open(tc_filename + '.info.json', encoding='utf-8') as infof: +                with io.open(info_json_fn, encoding='utf-8') as infof:                      info_dict = json.load(infof)                  for (info_field, expected) in tc.get('info_dict', {}).items():                      if isinstance(expected, compat_str) and expected.startswith('md5:'): diff --git a/test/test_playlists.py b/test/test_playlists.py index 706b6bdca..d83b3bf51 100644 --- a/test/test_playlists.py +++ b/test/test_playlists.py @@ -22,6 +22,7 @@ from youtube_dl.extractor import (      LivestreamIE,      NHLVideocenterIE,      BambuserChannelIE, +    BandcampAlbumIE  ) @@ -103,5 +104,13 @@ class TestPlaylists(unittest.TestCase):          self.assertEqual(result['title'], u'pixelversity')          self.assertTrue(len(result['entries']) >= 66) +    def test_bandcamp_album(self): +        dl = FakeYDL() +        ie = BandcampAlbumIE(dl) +        result = ie.extract('http://mpallante.bandcamp.com/album/nightmare-night-ep') +        self.assertIsPlaylist(result) +        self.assertEqual(result['title'], u'Nightmare Night EP') +        self.assertTrue(len(result['entries']) >= 4) +  if __name__ == '__main__':      unittest.main() diff --git a/test/test_utils.py b/test/test_utils.py index f3fbff042..e9e590e74 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -24,6 +24,8 @@ from youtube_dl.utils import (      xpath_with_ns,      smuggle_url,      unsmuggle_url, +    shell_quote, +    encodeFilename,  )  if sys.version_info < (3, 0): @@ -170,6 +172,10 @@ class TestUtil(unittest.TestCase):          self.assertEqual(res_url, url)          self.assertEqual(res_data, None) +    def test_shell_quote(self): +        args = ['ffmpeg', '-i', encodeFilename(u'ñ€ß\'.mp4')] +        self.assertEqual(shell_quote(args), u"""ffmpeg -i 'ñ€ß'"'"'.mp4'""") +  if __name__ == '__main__':      unittest.main() diff --git a/test/test_write_info_json.py b/test/test_write_info_json.py index a5b6f6972..30c4859fd 100644 --- a/test/test_write_info_json.py +++ b/test/test_write_info_json.py @@ -31,7 +31,7 @@ params = get_params({  TEST_ID = 'BaW_jenozKc' -INFO_JSON_FILE = TEST_ID + '.mp4.info.json' +INFO_JSON_FILE = TEST_ID + '.info.json'  DESCRIPTION_FILE = TEST_ID + '.mp4.description'  EXPECTED_DESCRIPTION = u'''test chars:  "'/\ä↭𝕐 diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 088f59586..e5a542ed5 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -5,9 +5,6 @@ import subprocess  import sys  import time -if os.name == 'nt': -    import ctypes -  from .utils import (      compat_urllib_error,      compat_urllib_request, @@ -151,16 +148,8 @@ class FileDownloader(object):      def to_stderr(self, message):          self.ydl.to_screen(message) -    def to_cons_title(self, message): -        """Set console/terminal window title to message.""" -        if not self.params.get('consoletitle', False): -            return -        if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): -            # c_wchar_p() might not be necessary if `message` is -            # already of type unicode() -            ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) -        elif 'TERM' in os.environ: -            self.to_screen('\033]0;%s\007' % message, skip_eol=True) +    def to_console_title(self, message): +        self.ydl.to_console_title(message)      def trouble(self, *args, **kargs):          self.ydl.trouble(*args, **kargs) @@ -249,7 +238,7 @@ class FileDownloader(object):          else:              self.to_screen(u'\r%s[download] %s of %s at %s ETA %s' %                  (clear_line, percent_str, data_len_str, speed_str, eta_str), skip_eol=True) -        self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % +        self.to_console_title(u'youtube-dl - %s of %s at %s ETA %s' %                  (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))      def report_resuming_byte(self, resume_len): diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index 5253c39e1..2700051cf 100644 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -5,6 +5,7 @@ from __future__ import absolute_import  import errno  import io +import json  import os  import re  import shutil @@ -13,7 +14,34 @@ import sys  import time  import traceback -from .utils import * +if os.name == 'nt': +    import ctypes + +from .utils import ( +    compat_http_client, +    compat_print, +    compat_str, +    compat_urllib_error, +    compat_urllib_request, +    ContentTooShortError, +    date_from_str, +    DateRange, +    determine_ext, +    DownloadError, +    encodeFilename, +    ExtractorError, +    locked_file, +    MaxDownloadsReached, +    PostProcessingError, +    preferredencoding, +    SameFileError, +    sanitize_filename, +    subtitles_filename, +    takewhile_inclusive, +    UnavailableVideoError, +    write_json_file, +    write_string, +)  from .extractor import get_info_extractor, gen_extractors  from .FileDownloader import FileDownloader @@ -57,6 +85,7 @@ class YoutubeDL(object):      forcethumbnail:    Force printing thumbnail URL.      forcedescription:  Force printing description.      forcefilename:     Force printing final filename. +    forcejson:         Force printing info_dict as JSON.      simulate:          Do not download the video files.      format:            Video format code.      format_limit:      Highest quality format to try. @@ -176,6 +205,37 @@ class YoutubeDL(object):              output = output.encode(preferredencoding())          sys.stderr.write(output) +    def to_console_title(self, message): +        if not self.params.get('consoletitle', False): +            return +        if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): +            # c_wchar_p() might not be necessary if `message` is +            # already of type unicode() +            ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) +        elif 'TERM' in os.environ: +            write_string(u'\033]0;%s\007' % message, self._screen_file) + +    def save_console_title(self): +        if not self.params.get('consoletitle', False): +            return +        if 'TERM' in os.environ: +            # Save the title on stack +            write_string(u'\033[22;0t', self._screen_file) + +    def restore_console_title(self): +        if not self.params.get('consoletitle', False): +            return +        if 'TERM' in os.environ: +            # Restore the title from stack +            write_string(u'\033[23;0t', self._screen_file) + +    def __enter__(self): +        self.save_console_title() +        return self + +    def __exit__(self, *args): +        self.restore_console_title() +      def fixed_template(self):          """Checks if the output template is fixed."""          return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None) @@ -254,7 +314,7 @@ class YoutubeDL(object):          """Report file has already been fully downloaded."""          try:              self.to_screen(u'[download] %s has already been downloaded' % file_name) -        except (UnicodeEncodeError) as err: +        except UnicodeEncodeError:              self.to_screen(u'[download] The file has already been downloaded')      def increment_downloads(self): @@ -385,7 +445,7 @@ class YoutubeDL(object):          result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system          if result_type == 'video':              self.add_extra_info(ie_result, extra_info) -            return self.process_video_result(ie_result) +            return self.process_video_result(ie_result, download=download)          elif result_type == 'url':              # We have to add extra_info to the results because it may be              # contained in a playlist @@ -579,7 +639,7 @@ class YoutubeDL(object):          # Forced printings          if self.params.get('forcetitle', False): -            compat_print(info_dict['title']) +            compat_print(info_dict['fulltitle'])          if self.params.get('forceid', False):              compat_print(info_dict['id'])          if self.params.get('forceurl', False): @@ -593,6 +653,8 @@ class YoutubeDL(object):              compat_print(filename)          if self.params.get('forceformat', False):              compat_print(info_dict['format']) +        if self.params.get('forcejson', False): +            compat_print(json.dumps(info_dict))          # Do nothing else if in simulate mode          if self.params.get('simulate', False): @@ -640,7 +702,7 @@ class YoutubeDL(object):              # subtitles download errors are already managed as troubles in relevant IE              # that way it will silently go on when used with unsupporting IE              subtitles = info_dict['subtitles'] -            sub_format = self.params.get('subtitlesformat') +            sub_format = self.params.get('subtitlesformat', 'srt')              for sub_lang in subtitles.keys():                  sub = subtitles[sub_lang]                  if sub is None: @@ -655,7 +717,7 @@ class YoutubeDL(object):                      return          if self.params.get('writeinfojson', False): -            infofn = filename + u'.info.json' +            infofn = os.path.splitext(filename)[0] + u'.info.json'              self.report_writeinfojson(infofn)              try:                  json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle']) @@ -781,20 +843,42 @@ class YoutubeDL(object):          return res      def list_formats(self, info_dict): +        def format_note(fdict): +            if fdict.get('format_note') is not None: +                return fdict['format_note'] +            res = u'' +            if fdict.get('vcodec') is not None: +                res += u'%-5s' % fdict['vcodec'] +            elif fdict.get('vbr') is not None: +                res += u'video' +            if fdict.get('vbr') is not None: +                res += u'@%4dk' % fdict['vbr'] +            if fdict.get('acodec') is not None: +                if res: +                    res += u', ' +                res += u'%-5s' % fdict['acodec'] +            elif fdict.get('abr') is not None: +                if res: +                    res += u', ' +                res += 'audio' +            if fdict.get('abr') is not None: +                res += u'@%3dk' % fdict['abr'] +            return res +          def line(format):              return (u'%-20s%-10s%-12s%s' % (                  format['format_id'],                  format['ext'],                  self.format_resolution(format), -                format.get('format_note', ''), +                format_note(format),                  )              )          formats = info_dict.get('formats', [info_dict])          formats_s = list(map(line, formats))          if len(formats) > 1: -            formats_s[0] += (' ' if formats[0].get('format_note') else '') + '(worst)' -            formats_s[-1] += (' ' if formats[-1].get('format_note') else '') + '(best)' +            formats_s[0] += (' ' if format_note(formats[0]) else '') + '(worst)' +            formats_s[-1] += (' ' if format_note(formats[-1]) else '') + '(best)'          header_line = line({              'format_id': u'format code', 'ext': u'extension', diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 4dee487ab..64ebf4d48 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -34,6 +34,7 @@ __authors__  = (      'Andras Elso',      'Jelle van der Waa',      'Marcin Cieślak', +    'Anton Larionov',  )  __license__ = 'Public Domain' @@ -306,6 +307,9 @@ def parseOpts(overrideArguments=None):      verbosity.add_option('--get-format',              action='store_true', dest='getformat',              help='simulate, quiet but print output format', default=False) +    verbosity.add_option('-j', '--dump-json', +            action='store_true', dest='dumpjson', +            help='simulate, quiet but print JSON information', default=False)      verbosity.add_option('--newline',              action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)      verbosity.add_option('--no-progress', @@ -603,13 +607,12 @@ def _real_main(argv=None):                       u' file! Use "%%(ext)s" instead of %r' %                       determine_ext(outtmpl, u'')) -    # YoutubeDL -    ydl = YoutubeDL({ +    ydl_opts = {          'usenetrc': opts.usenetrc,          'username': opts.username,          'password': opts.password,          'videopassword': opts.videopassword, -        'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), +        'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson),          'forceurl': opts.geturl,          'forcetitle': opts.gettitle,          'forceid': opts.getid, @@ -617,8 +620,9 @@ def _real_main(argv=None):          'forcedescription': opts.getdescription,          'forcefilename': opts.getfilename,          'forceformat': opts.getformat, +        'forcejson': opts.dumpjson,          'simulate': opts.simulate, -        'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), +        'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson),          'format': opts.format,          'format_limit': opts.format_limit,          'listformats': opts.listformats, @@ -667,61 +671,63 @@ def _real_main(argv=None):          'youtube_print_sig_code': opts.youtube_print_sig_code,          'age_limit': opts.age_limit,          'download_archive': opts.download_archive, -        }) +    } -    if opts.verbose: -        write_string(u'[debug] youtube-dl version ' + __version__ + u'\n') -        try: -            sp = subprocess.Popen( -                ['git', 'rev-parse', '--short', 'HEAD'], -                stdout=subprocess.PIPE, stderr=subprocess.PIPE, -                cwd=os.path.dirname(os.path.abspath(__file__))) -            out, err = sp.communicate() -            out = out.decode().strip() -            if re.match('[0-9a-f]+', out): -                write_string(u'[debug] Git HEAD: ' + out + u'\n') -        except: +    with YoutubeDL(ydl_opts) as ydl: +        if opts.verbose: +            write_string(u'[debug] youtube-dl version ' + __version__ + u'\n')              try: -                sys.exc_clear() +                sp = subprocess.Popen( +                    ['git', 'rev-parse', '--short', 'HEAD'], +                    stdout=subprocess.PIPE, stderr=subprocess.PIPE, +                    cwd=os.path.dirname(os.path.abspath(__file__))) +                out, err = sp.communicate() +                out = out.decode().strip() +                if re.match('[0-9a-f]+', out): +                    write_string(u'[debug] Git HEAD: ' + out + u'\n')              except: -                pass -        write_string(u'[debug] Python version %s - %s' %(platform.python_version(), platform_name()) + u'\n') - -        proxy_map = {} -        for handler in opener.handlers: -            if hasattr(handler, 'proxies'): -                proxy_map.update(handler.proxies) -        write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n') - -    ydl.add_default_info_extractors() - -    # PostProcessors -    # Add the metadata pp first, the other pps will copy it -    if opts.addmetadata: -        ydl.add_post_processor(FFmpegMetadataPP()) -    if opts.extractaudio: -        ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites)) -    if opts.recodevideo: -        ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo)) -    if opts.embedsubtitles: -        ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat)) - -    # Update version -    if opts.update_self: -        update_self(ydl.to_screen, opts.verbose) - -    # Maybe do nothing -    if len(all_urls) < 1: -        if not opts.update_self: -            parser.error(u'you must provide at least one URL') -        else: -            sys.exit() +                try: +                    sys.exc_clear() +                except: +                    pass +            write_string(u'[debug] Python version %s - %s' % +                         (platform.python_version(), platform_name()) + u'\n') + +            proxy_map = {} +            for handler in opener.handlers: +                if hasattr(handler, 'proxies'): +                    proxy_map.update(handler.proxies) +            write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n') + +        ydl.add_default_info_extractors() + +        # PostProcessors +        # Add the metadata pp first, the other pps will copy it +        if opts.addmetadata: +            ydl.add_post_processor(FFmpegMetadataPP()) +        if opts.extractaudio: +            ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites)) +        if opts.recodevideo: +            ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo)) +        if opts.embedsubtitles: +            ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat)) + +        # Update version +        if opts.update_self: +            update_self(ydl.to_screen, opts.verbose) + +        # Maybe do nothing +        if len(all_urls) < 1: +            if not opts.update_self: +                parser.error(u'you must provide at least one URL') +            else: +                sys.exit() -    try: -        retcode = ydl.download(all_urls) -    except MaxDownloadsReached: -        ydl.to_screen(u'--max-download limit reached, aborting.') -        retcode = 101 +        try: +            retcode = ydl.download(all_urls) +        except MaxDownloadsReached: +            ydl.to_screen(u'--max-download limit reached, aborting.') +            retcode = 101      # Dump cookie jar if requested      if opts.cookiefile is not None: diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index 0594a3666..02f9e2546 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -1,5 +1,6 @@  from .appletrailers import AppleTrailersIE  from .addanime import AddAnimeIE +from .anitube import AnitubeIE  from .archiveorg import ArchiveOrgIE  from .ard import ARDIE  from .arte import ( @@ -10,7 +11,7 @@ from .arte import (  )  from .auengine import AUEngineIE  from .bambuser import BambuserIE, BambuserChannelIE -from .bandcamp import BandcampIE +from .bandcamp import BandcampIE, BandcampAlbumIE  from .bliptv import BlipTVIE, BlipTVUserIE  from .bloomberg import BloombergIE  from .breakcom import BreakIE @@ -25,6 +26,7 @@ from .comedycentral import ComedyCentralIE  from .condenast import CondeNastIE  from .criterion import CriterionIE  from .cspan import CSpanIE +from .d8 import D8IE  from .dailymotion import (      DailymotionIE,      DailymotionPlaylistIE, @@ -80,7 +82,7 @@ from .keezmovies import KeezMoviesIE  from .kickstarter import KickStarterIE  from .keek import KeekIE  from .liveleak import LiveLeakIE -from .livestream import LivestreamIE +from .livestream import LivestreamIE, LivestreamOriginalIE  from .metacafe import MetacafeIE  from .metacritic import MetacriticIE  from .mit import TechTVMITIE, MITIE @@ -116,19 +118,24 @@ from .slashdot import SlashdotIE  from .slideshare import SlideshareIE  from .sohu import SohuIE  from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE -from .southparkstudios import SouthParkStudiosIE +from .southparkstudios import ( +    SouthParkStudiosIE, +    SouthparkDeIE, +)  from .space import SpaceIE  from .spankwire import SpankwireIE  from .spiegel import SpiegelIE  from .stanfordoc import StanfordOpenClassroomIE  from .statigram import StatigramIE  from .steam import SteamIE +from .streamcloud import StreamcloudIE  from .sztvhu import SztvHuIE  from .teamcoco import TeamcocoIE  from .techtalks import TechTalksIE  from .ted import TEDIE  from .tf1 import TF1IE  from .thisav import ThisAVIE +from .toutv import TouTvIE  from .traileraddict import TrailerAddictIE  from .trilulilu import TriluliluIE  from .tube8 import Tube8IE diff --git a/youtube_dl/extractor/anitube.py b/youtube_dl/extractor/anitube.py new file mode 100644 index 000000000..691d5a844 --- /dev/null +++ b/youtube_dl/extractor/anitube.py @@ -0,0 +1,55 @@ +import re +import xml.etree.ElementTree + +from .common import InfoExtractor + + +class AnitubeIE(InfoExtractor): +    IE_NAME = u'anitube.se' +    _VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)' + +    _TEST = { +        u'url': u'http://www.anitube.se/video/36621', +        u'md5': u'59d0eeae28ea0bc8c05e7af429998d43', +        u'file': u'36621.mp4', +        u'info_dict': { +            u'id': u'36621', +            u'ext': u'mp4', +            u'title': u'Recorder to Randoseru 01', +        }, +        u'skip': u'Blocked in the US', +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') + +        webpage = self._download_webpage(url, video_id) +        key = self._html_search_regex(r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', +                                      webpage, u'key') + +        webpage_config = self._download_webpage('http://www.anitube.se/nuevo/econfig.php?key=%s' % key, +                                                key) +        config_xml = xml.etree.ElementTree.fromstring(webpage_config.encode('utf-8')) + +        video_title = config_xml.find('title').text + +        formats = [] +        video_url = config_xml.find('file') +        if video_url is not None: +            formats.append({ +                'format_id': 'sd', +                'url': video_url.text, +            }) +        video_url = config_xml.find('filehd') +        if video_url is not None: +            formats.append({ +                'format_id': 'hd', +                'url': video_url.text, +            }) + +        return { +            'id': video_id, +            'title': video_title, +            'formats': formats +        } diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index b35a679e3..44d0b5d70 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -69,7 +69,7 @@ class ArteTvIE(InfoExtractor):              lang = mobj.group('lang')              return self._extract_liveweb(url, name, lang) -        if re.search(self._LIVE_URL, video_id) is not None: +        if re.search(self._LIVE_URL, url) is not None:              raise ExtractorError(u'Arte live streams are not yet supported, sorry')              # self.extractLiveStream(url)              # return @@ -115,7 +115,7 @@ class ArteTvIE(InfoExtractor):          event_doc = config_doc.find('event')          url_node = event_doc.find('video').find('urlHd')          if url_node is None: -            url_node = video_doc.find('urlSd') +            url_node = event_doc.find('urlSd')          return {'id': video_id,                  'title': event_doc.find('name%s' % lang.capitalize()).text, diff --git a/youtube_dl/extractor/auengine.py b/youtube_dl/extractor/auengine.py index 0febbff4f..95c038003 100644 --- a/youtube_dl/extractor/auengine.py +++ b/youtube_dl/extractor/auengine.py @@ -1,10 +1,10 @@ -import os.path  import re  from .common import InfoExtractor  from ..utils import (      compat_urllib_parse, -    compat_urllib_parse_urlparse, +    determine_ext, +    ExtractorError,  )  class AUEngineIE(InfoExtractor): @@ -25,22 +25,25 @@ class AUEngineIE(InfoExtractor):          title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',                  webpage, u'title')          title = title.strip() -        links = re.findall(r'[^A-Za-z0-9]?(?:file|url):\s*["\'](http[^\'"&]*)', webpage) -        links = [compat_urllib_parse.unquote(l) for l in links] +        links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage) +        links = map(compat_urllib_parse.unquote, links) + +        thumbnail = None +        video_url = None          for link in links: -            root, pathext = os.path.splitext(compat_urllib_parse_urlparse(link).path) -            if pathext == '.png': +            if link.endswith('.png'):                  thumbnail = link -            elif pathext == '.mp4': -                url = link -                ext = pathext +            elif '/videos/' in link: +                video_url = link +        if not video_url: +            raise ExtractorError(u'Could not find video URL') +        ext = u'.' + determine_ext(video_url)          if ext == title[-len(ext):]:              title = title[:-len(ext)] -        ext = ext[1:] -        return [{ + +        return {              'id':        video_id, -            'url':       url, -            'ext':       ext, +            'url':       video_url,              'title':     title,              'thumbnail': thumbnail, -        }] +        } diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py index f3b36f473..967568c4a 100644 --- a/youtube_dl/extractor/bambuser.py +++ b/youtube_dl/extractor/bambuser.py @@ -15,7 +15,8 @@ class BambuserIE(InfoExtractor):      _TEST = {          u'url': u'http://bambuser.com/v/4050584', -        u'md5': u'fba8f7693e48fd4e8641b3fd5539a641', +        # MD5 seems to be flaky, see https://travis-ci.org/rg3/youtube-dl/jobs/14051016#L388 +        #u'md5': u'fba8f7693e48fd4e8641b3fd5539a641',          u'info_dict': {              u'id': u'4050584',              u'ext': u'flv', diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py index 129a20f44..359d4174b 100644 --- a/youtube_dl/extractor/bandcamp.py +++ b/youtube_dl/extractor/bandcamp.py @@ -3,13 +3,16 @@ import re  from .common import InfoExtractor  from ..utils import ( +    compat_str, +    compat_urlparse,      ExtractorError,  )  class BandcampIE(InfoExtractor): +    IE_NAME = u'Bandcamp'      _VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)' -    _TEST = { +    _TESTS = [{          u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',          u'file': u'1812978515.mp3',          u'md5': u'cdeb30cdae1921719a3cbcab696ef53c', @@ -17,7 +20,29 @@ class BandcampIE(InfoExtractor):              u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad"          },          u'skip': u'There is a limit of 200 free downloads / month for the test song' -    } +    }, { +        u'url': u'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', +        u'playlist': [ +            { +                u'file': u'1353101989.mp3', +                u'md5': u'39bc1eded3476e927c724321ddf116cf', +                u'info_dict': { +                    u'title': u'Intro', +                } +            }, +            { +                u'file': u'38097443.mp3', +                u'md5': u'1a2c32e2691474643e912cc6cd4bffaa', +                u'info_dict': { +                    u'title': u'Kero One - Keep It Alive (Blazo remix)', +                } +            }, +        ], +        u'params': { +            u'playlistend': 2 +        }, +        u'skip': u'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test' +    }]      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) @@ -26,6 +51,26 @@ class BandcampIE(InfoExtractor):          # We get the link to the free download page          m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)          if m_download is None: +            m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage) +        if m_trackinfo: +            json_code = m_trackinfo.group(1) +            data = json.loads(json_code) + +            entries = [] +            for d in data: +                formats = [{ +                    'format_id': 'format_id', +                    'url': format_url, +                    'ext': format_id.partition('-')[0] +                } for format_id, format_url in sorted(d['file'].items())] +                entries.append({ +                    'id': compat_str(d['id']), +                    'title': d['title'], +                    'formats': formats, +                }) + +            return self.playlist_result(entries, title, title) +        else:              raise ExtractorError(u'No free songs found')          download_link = m_download.group(1) @@ -61,3 +106,25 @@ class BandcampIE(InfoExtractor):                        }          return [track_info] + + +class BandcampAlbumIE(InfoExtractor): +    IE_NAME = u'Bandcamp:album' +    _VALID_URL = r'http://.*?\.bandcamp\.com/album/(?P<title>.*)' + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        title = mobj.group('title') +        webpage = self._download_webpage(url, title) +        tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage) +        if not tracks_paths: +            raise ExtractorError(u'The page doesn\'t contain any track') +        entries = [ +            self.url_result(compat_urlparse.urljoin(url, t_path), ie=BandcampIE.ie_key()) +            for t_path in tracks_paths] +        title = self._search_regex(r'album_title : "(.*?)"', webpage, u'title') +        return { +            '_type': 'playlist', +            'title': title, +            'entries': entries, +        } diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py index 1db9b24cf..bfa2a8b40 100644 --- a/youtube_dl/extractor/canalplus.py +++ b/youtube_dl/extractor/canalplus.py @@ -5,6 +5,7 @@ import xml.etree.ElementTree  from .common import InfoExtractor  from ..utils import unified_strdate +  class CanalplusIE(InfoExtractor):      _VALID_URL = r'https?://(www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>\d+))'      _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s' @@ -25,7 +26,7 @@ class CanalplusIE(InfoExtractor):      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) -        video_id = mobj.group('id') +        video_id = mobj.groupdict().get('id')          if video_id is None:              webpage = self._download_webpage(url, mobj.group('path'))              video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id') diff --git a/youtube_dl/extractor/collegehumor.py b/youtube_dl/extractor/collegehumor.py index 8d4c93d6d..0c29acfb1 100644 --- a/youtube_dl/extractor/collegehumor.py +++ b/youtube_dl/extractor/collegehumor.py @@ -71,10 +71,8 @@ class CollegeHumorIE(InfoExtractor):              adoc = xml.etree.ElementTree.fromstring(manifestXml)              try: -                media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0] -                node_id = media_node.attrib['url']                  video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text -            except IndexError as err: +            except IndexError:                  raise ExtractorError(u'Invalid manifest file')              url_pr = compat_urllib_parse_urlparse(info['thumbnail'])              info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','') diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 9c20d30b4..eb3435c77 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -71,6 +71,10 @@ class InfoExtractor(object):                                  ("3D" or "DASH video")                      * width     Width of the video, if known                      * height    Height of the video, if known +                    * abr       Average audio bitrate in KBit/s +                    * acodec    Name of the audio codec in use +                    * vbr       Average video bitrate in KBit/s +                    * vcodec    Name of the video codec in use      webpage_url:    The url to the video webpage, if given to youtube-dl it                      should allow to get the same result again. (It will be set                      by YoutubeDL if it's missing) @@ -315,13 +319,19 @@ class InfoExtractor(object):      # Helper functions for extracting OpenGraph info      @staticmethod -    def _og_regex(prop): -        return r'<meta.+?property=[\'"]og:%s[\'"].+?content=(?:"(.+?)"|\'(.+?)\')' % re.escape(prop) +    def _og_regexes(prop): +        content_re = r'content=(?:"([^>]+?)"|\'(.+?)\')' +        property_re = r'property=[\'"]og:%s[\'"]' % re.escape(prop) +        template = r'<meta[^>]+?%s[^>]+?%s' +        return [ +            template % (property_re, content_re), +            template % (content_re, property_re), +        ]      def _og_search_property(self, prop, html, name=None, **kargs):          if name is None:              name = 'OpenGraph %s' % prop -        escaped = self._search_regex(self._og_regex(prop), html, name, flags=re.DOTALL, **kargs) +        escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)          if escaped is None:              return None          return unescapeHTML(escaped) @@ -336,10 +346,21 @@ class InfoExtractor(object):          return self._og_search_property('title', html, **kargs)      def _og_search_video_url(self, html, name='video url', secure=True, **kargs): -        regexes = [self._og_regex('video')] -        if secure: regexes.insert(0, self._og_regex('video:secure_url')) +        regexes = self._og_regexes('video') +        if secure: regexes = self._og_regexes('video:secure_url') + regexes          return self._html_search_regex(regexes, html, name, **kargs) +    def _html_search_meta(self, name, html, display_name=None): +        if display_name is None: +            display_name = name +        return self._html_search_regex( +            r'''(?ix)<meta(?=[^>]+(?:name|property)=["\']%s["\']) +                    [^>]+content=["\']([^"\']+)["\']''' % re.escape(name), +            html, display_name, fatal=False) + +    def _dc_search_uploader(self, html): +        return self._html_search_meta('dc.creator', html, 'uploader') +      def _rta_search(self, html):          # See http://www.rtalabel.org/index.php?content=howtofaq#single          if re.search(r'(?ix)<meta\s+name="rating"\s+' @@ -348,6 +369,23 @@ class InfoExtractor(object):              return 18          return 0 +    def _media_rating_search(self, html): +        # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ +        rating = self._html_search_meta('rating', html) + +        if not rating: +            return None + +        RATING_TABLE = { +            'safe for kids': 0, +            'general': 8, +            '14 years': 14, +            'mature': 17, +            'restricted': 19, +        } +        return RATING_TABLE.get(rating.lower(), None) + +  class SearchInfoExtractor(InfoExtractor):      """ diff --git a/youtube_dl/extractor/d8.py b/youtube_dl/extractor/d8.py new file mode 100644 index 000000000..a56842b16 --- /dev/null +++ b/youtube_dl/extractor/d8.py @@ -0,0 +1,22 @@ +# encoding: utf-8 +from .canalplus import CanalplusIE + + +class D8IE(CanalplusIE): +    _VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)' +    _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s' +    IE_NAME = u'd8.tv' + +    _TEST = { +        u'url': u'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html', +        u'file': u'966289.flv', +        u'info_dict': { +            u'title': u'Campagne intime - Documentaire exceptionnel', +            u'description': u'md5:d2643b799fb190846ae09c61e59a859f', +            u'upload_date': u'20131108', +        }, +        u'params': { +            # rtmp +            u'skip_download': True, +        }, +    } diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index e87690f9d..71f5e03ee 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -186,7 +186,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):              webpage = self._download_webpage(request,                                               id, u'Downloading page %s' % pagenum) -            playlist_el = get_element_by_attribute(u'class', u'video_list', webpage) +            playlist_el = get_element_by_attribute(u'class', u'row video_list', webpage)              video_ids.extend(re.findall(r'data-id="(.+?)"', playlist_el))              if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None: diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py index 2cfbcd363..f21ef8853 100644 --- a/youtube_dl/extractor/eighttracks.py +++ b/youtube_dl/extractor/eighttracks.py @@ -1,4 +1,3 @@ -import itertools  import json  import random  import re diff --git a/youtube_dl/extractor/escapist.py b/youtube_dl/extractor/escapist.py index 3aa2da52c..b1242f6bc 100644 --- a/youtube_dl/extractor/escapist.py +++ b/youtube_dl/extractor/escapist.py @@ -11,11 +11,11 @@ from ..utils import (  class EscapistIE(InfoExtractor): -    _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$' +    _VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'      _TEST = {          u'url': u'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',          u'file': u'6618-Breaking-Down-Baldurs-Gate.mp4', -        u'md5': u'c6793dbda81388f4264c1ba18684a74d', +        u'md5': u'ab3a706c681efca53f0a35f1415cf0d1',          u'info_dict': {              u"description": u"Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",               u"uploader": u"the-escapist-presents",  @@ -25,50 +25,60 @@ class EscapistIE(InfoExtractor):      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) -        if mobj is None: -            raise ExtractorError(u'Invalid URL: %s' % url)          showName = mobj.group('showname')          videoId = mobj.group('episode')          self.report_extraction(videoId)          webpage = self._download_webpage(url, videoId) -        videoDesc = self._html_search_regex('<meta name="description" content="([^"]*)"', +        videoDesc = self._html_search_regex( +            r'<meta name="description" content="([^"]*)"',              webpage, u'description', fatal=False) -        playerUrl = self._og_search_video_url(webpage, name='player url') +        playerUrl = self._og_search_video_url(webpage, name=u'player URL') -        title = self._html_search_regex('<meta name="title" content="([^"]*)"', -            webpage, u'player url').split(' : ')[-1] +        title = self._html_search_regex( +            r'<meta name="title" content="([^"]*)"', +            webpage, u'title').split(' : ')[-1] -        configUrl = self._search_regex('config=(.*)$', playerUrl, u'config url') +        configUrl = self._search_regex('config=(.*)$', playerUrl, u'config URL')          configUrl = compat_urllib_parse.unquote(configUrl) -        configJSON = self._download_webpage(configUrl, videoId, -                                            u'Downloading configuration', -                                            u'unable to download configuration') - -        # Technically, it's JavaScript, not JSON -        configJSON = configJSON.replace("'", '"') - +        formats = [] + +        def _add_format(name, cfgurl): +            configJSON = self._download_webpage( +                cfgurl, videoId, +                u'Downloading ' + name + ' configuration', +                u'Unable to download ' + name + ' configuration') + +            # Technically, it's JavaScript, not JSON +            configJSON = configJSON.replace("'", '"') + +            try: +                config = json.loads(configJSON) +            except (ValueError,) as err: +                raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err)) +            playlist = config['playlist'] +            formats.append({ +                'url': playlist[1]['url'], +                'format_id': name, +            }) + +        _add_format(u'normal', configUrl) +        hq_url = (configUrl + +                  ('&hq=1' if '?' in configUrl else configUrl + '?hq=1'))          try: -            config = json.loads(configJSON) -        except (ValueError,) as err: -            raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err)) +            _add_format(u'hq', hq_url) +        except ExtractorError: +            pass  # That's fine, we'll just use normal quality -        playlist = config['playlist'] -        videoUrl = playlist[1]['url'] - -        info = { +        return {              'id': videoId, -            'url': videoUrl, +            'formats': formats,              'uploader': showName, -            'upload_date': None,              'title': title, -            'ext': 'mp4',              'thumbnail': self._og_search_thumbnail(webpage),              'description': videoDesc,              'player_url': playerUrl,          } - -        return [info] diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index f8bdfc2d3..3b210710e 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -1,5 +1,4 @@  import json -import netrc  import re  import socket diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py index 9c89362ef..dba1a8dc2 100644 --- a/youtube_dl/extractor/fktv.py +++ b/youtube_dl/extractor/fktv.py @@ -39,7 +39,6 @@ class FKTVIE(InfoExtractor):          for i, _ in enumerate(files, 1):              video_id = '%04d%d' % (episode, i)              video_url = 'http://dl%d.fernsehkritik.tv/fernsehkritik%d%s.flv' % (server, episode, '' if i == 1 else '-%d' % i) -            video_title = 'Fernsehkritik %d.%d' % (episode, i)              videos.append({                  'id': video_id,                  'url': video_url, diff --git a/youtube_dl/extractor/gamekings.py b/youtube_dl/extractor/gamekings.py index 4b4259447..c91669b0e 100644 --- a/youtube_dl/extractor/gamekings.py +++ b/youtube_dl/extractor/gamekings.py @@ -1,9 +1,6 @@  import re  from .common import InfoExtractor -from ..utils import ( -    determine_ext, -)  class GamekingsIE(InfoExtractor): @@ -11,7 +8,8 @@ class GamekingsIE(InfoExtractor):      _TEST = {          u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/",          u'file': u'20130811.mp4', -        u'md5': u'17f6088f7d0149ff2b46f2714bdb1954', +        # MD5 is flaky, seems to change regularly +        #u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',          u'info_dict': {              u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review",              u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.", diff --git a/youtube_dl/extractor/gamespot.py b/youtube_dl/extractor/gamespot.py index 098768361..9645b00c3 100644 --- a/youtube_dl/extractor/gamespot.py +++ b/youtube_dl/extractor/gamespot.py @@ -24,7 +24,7 @@ class GameSpotIE(InfoExtractor):      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) -        page_id = video_id = mobj.group('page_id') +        page_id = mobj.group('page_id')          webpage = self._download_webpage(url, page_id)          data_video_json = self._search_regex(r'data-video=\'(.*?)\'', webpage, u'data video')          data_video = json.loads(unescapeHTML(data_video_json)) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index c7552fddb..0b5f2b2bb 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -162,6 +162,16 @@ class GenericIE(InfoExtractor):              raise ExtractorError(u'Failed to download URL: %s' % url)          self.report_extraction(video_id) + +        # it's tempting to parse this further, but you would +        # have to take into account all the variations like +        #   Video Title - Site Name +        #   Site Name | Video Title +        #   Video Title - Tagline | Site Name +        # and so on and so forth; it's just not practical +        video_title = self._html_search_regex(r'<title>(.*)</title>', +            webpage, u'video title', default=u'video', flags=re.DOTALL) +          # Look for BrightCove:          bc_url = BrightcoveIE._extract_brightcove_url(webpage)          if bc_url is not None: @@ -177,17 +187,20 @@ class GenericIE(InfoExtractor):              return self.url_result(surl, 'Vimeo')          # Look for embedded YouTube player -        mobj = re.search( -            r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?youtube.com/embed/.+?)\1', webpage) -        if mobj: -            surl = unescapeHTML(mobj.group(u'url')) -            return self.url_result(surl, 'Youtube') +        matches = re.findall( +            r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube.com/embed/.+?)\1', webpage) +        if matches: +            urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube') +                     for tuppl in matches] +            return self.playlist_result( +                urlrs, playlist_id=video_id, playlist_title=video_title)          # Look for Bandcamp pages with custom domain          mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)          if mobj is not None:              burl = unescapeHTML(mobj.group(1)) -            return self.url_result(burl, 'Bandcamp') +            # Don't set the extractor because it can be a track url or an album +            return self.url_result(burl)          # Start with something easy: JW Player in SWFObject          mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) @@ -226,15 +239,6 @@ class GenericIE(InfoExtractor):          video_extension = os.path.splitext(video_id)[1][1:]          video_id = os.path.splitext(video_id)[0] -        # it's tempting to parse this further, but you would -        # have to take into account all the variations like -        #   Video Title - Site Name -        #   Site Name | Video Title -        #   Video Title - Tagline | Site Name -        # and so on and so forth; it's just not practical -        video_title = self._html_search_regex(r'<title>(.*)</title>', -            webpage, u'video title', default=u'video', flags=re.DOTALL) -          # video uploader is domain name          video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',              url, u'video uploader') diff --git a/youtube_dl/extractor/jeuxvideo.py b/youtube_dl/extractor/jeuxvideo.py index 6bb54b932..0020c47cf 100644 --- a/youtube_dl/extractor/jeuxvideo.py +++ b/youtube_dl/extractor/jeuxvideo.py @@ -22,7 +22,7 @@ class JeuxVideoIE(InfoExtractor):      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) -        title = re.match(self._VALID_URL, url).group(1) +        title = mobj.group(1)          webpage = self._download_webpage(url, title)          xml_link = self._html_search_regex(              r'<param name="flashvars" value="config=(.*?)" />', diff --git a/youtube_dl/extractor/livestream.py b/youtube_dl/extractor/livestream.py index 4531fd6ab..5f548437c 100644 --- a/youtube_dl/extractor/livestream.py +++ b/youtube_dl/extractor/livestream.py @@ -1,16 +1,17 @@  import re  import json +import xml.etree.ElementTree  from .common import InfoExtractor  from ..utils import (      compat_urllib_parse_urlparse,      compat_urlparse, -    get_meta_content, -    ExtractorError, +    xpath_with_ns,  )  class LivestreamIE(InfoExtractor): +    IE_NAME = u'livestream'      _VALID_URL = r'http://new.livestream.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'      _TEST = {          u'url': u'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370', @@ -54,3 +55,44 @@ class LivestreamIE(InfoExtractor):              info = json.loads(self._download_webpage(api_url, video_id,                                                       u'Downloading video info'))              return self._extract_video_info(info) + + +# The original version of Livestream uses a different system +class LivestreamOriginalIE(InfoExtractor): +    IE_NAME = u'livestream:original' +    _VALID_URL = r'https?://www\.livestream\.com/(?P<user>[^/]+)/video\?.*?clipId=(?P<id>.*?)(&|$)' +    _TEST = { +        u'url': u'http://www.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', +        u'info_dict': { +            u'id': u'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', +            u'ext': u'flv', +            u'title': u'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital', +        }, +        u'params': { +            # rtmp +            u'skip_download': True, +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') +        user = mobj.group('user') +        api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id) + +        api_response = self._download_webpage(api_url, video_id) +        info = xml.etree.ElementTree.fromstring(api_response.encode('utf-8')) +        item = info.find('channel').find('item') +        ns = {'media': 'http://search.yahoo.com/mrss'} +        thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url'] +        # Remove the extension and number from the path (like 1.jpg) +        path = self._search_regex(r'(user-files/.+)_.*?\.jpg$', thumbnail_url, u'path') + +        return { +            'id': video_id, +            'title': item.find('title').text, +            'url': 'rtmp://extondemand.livestream.com/ondemand', +            'play_path': 'mp4:trans/dv15/mogulus-{0}.mp4'.format(path), +            'ext': 'flv', +            'thumbnail': thumbnail_url, +        } diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index 24a79ae13..04afd6c4c 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -48,7 +48,7 @@ class MTVIE(InfoExtractor):      def _transform_rtmp_url(rtmp_video_url):          m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)          if not m: -            raise ExtractorError(u'Cannot transform RTMP url') +            return rtmp_video_url          base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'          return base + m.group('finalid') @@ -59,7 +59,6 @@ class MTVIE(InfoExtractor):          if '/error_country_block.swf' in metadataXml:              raise ExtractorError(u'This video is not available from your country.', expected=True)          mdoc = xml.etree.ElementTree.fromstring(metadataXml.encode('utf-8')) -        renditions = mdoc.findall('.//rendition')          formats = []          for rendition in mdoc.findall('.//rendition'): diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py index 224f56ac8..458fe4063 100644 --- a/youtube_dl/extractor/nhl.py +++ b/youtube_dl/extractor/nhl.py @@ -72,7 +72,7 @@ class NHLIE(NHLBaseInfoExtractor):  class NHLVideocenterIE(NHLBaseInfoExtractor):      IE_NAME = u'nhl.com:videocenter' -    IE_DESC = u'Download the first 12 videos from a videocenter category' +    IE_DESC = u'NHL videocenter category'      _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?.*?catid=(?P<catid>[^&]+))?'      @classmethod diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py index 75cf4bb9f..8b3471919 100644 --- a/youtube_dl/extractor/pornhub.py +++ b/youtube_dl/extractor/pornhub.py @@ -6,7 +6,6 @@ from ..utils import (      compat_urllib_parse_urlparse,      compat_urllib_request,      compat_urllib_parse, -    unescapeHTML,  )  from ..aes import (      aes_decrypt_text diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py index 994778e16..3bbda128e 100644 --- a/youtube_dl/extractor/redtube.py +++ b/youtube_dl/extractor/redtube.py @@ -8,7 +8,9 @@ class RedTubeIE(InfoExtractor):      _TEST = {          u'url': u'http://www.redtube.com/66418',          u'file': u'66418.mp4', -        u'md5': u'7b8c22b5e7098a3e1c09709df1126d2d', +        # md5 varies from time to time, as in +        # https://travis-ci.org/rg3/youtube-dl/jobs/14052463#L295 +        #u'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',          u'info_dict': {              u"title": u"Sucked on a toilet",              u"age_limit": 18, diff --git a/youtube_dl/extractor/rtlnow.py b/youtube_dl/extractor/rtlnow.py index 9ac7c3be8..2f238de35 100644 --- a/youtube_dl/extractor/rtlnow.py +++ b/youtube_dl/extractor/rtlnow.py @@ -63,18 +63,6 @@ class RTLnowIE(InfoExtractor):          },      },      { -        u'url': u'http://www.rtlnitronow.de/recht-ordnung/stadtpolizei-frankfurt-gerichtsvollzieher-leipzig.php?film_id=129679&player=1&season=1', -        u'file': u'129679.flv', -        u'info_dict': { -            u'upload_date': u'20131016',  -            u'title': u'Recht & Ordnung - Stadtpolizei Frankfurt/ Gerichtsvollzieher...', -            u'description': u'Stadtpolizei Frankfurt/ Gerichtsvollzieher Leipzig', -        }, -        u'params': { -            u'skip_download': True, -        }, -    }, -    {          u'url': u'http://www.n-tvnow.de/top-gear/episode-1-2013-01-01-00-00-00.php?film_id=124903&player=1&season=10',          u'file': u'124903.flv',          u'info_dict': { diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index 4717fbb77..67b2dff9c 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -59,6 +59,7 @@ class SoundcloudIE(InfoExtractor):      ]      _CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28' +    _IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf'      @classmethod      def suitable(cls, url): @@ -83,28 +84,37 @@ class SoundcloudIE(InfoExtractor):              thumbnail = thumbnail.replace('-large', '-t500x500')          result = {              'id':       track_id, -            'url':      info['stream_url'] + '?client_id=' + self._CLIENT_ID,              'uploader': info['user']['username'],              'upload_date': unified_strdate(info['created_at']),              'title':    info['title'], -            'ext':      u'mp3', +            'ext':      info.get('original_format', u'mp3'),              'description': info['description'],              'thumbnail': thumbnail,          }          if info.get('downloadable', False): +            # We can build a direct link to the song              result['url'] = 'https://api.soundcloud.com/tracks/{0}/download?client_id={1}'.format(track_id, self._CLIENT_ID) -        if not info.get('streamable', False): -            # We have to get the rtmp url +        else: +            # We have to retrieve the url              stream_json = self._download_webpage( -                'http://api.soundcloud.com/i1/tracks/{0}/streams?client_id={1}'.format(track_id, self._CLIENT_ID), +                'http://api.soundcloud.com/i1/tracks/{0}/streams?client_id={1}'.format(track_id, self._IPHONE_CLIENT_ID),                  track_id, u'Downloading track url') -            rtmp_url = json.loads(stream_json)['rtmp_mp3_128_url'] -            # The url doesn't have an rtmp app, we have to extract the playpath -            url, path = rtmp_url.split('mp3:', 1) -            result.update({ -                'url': url, -                'play_path': 'mp3:' + path, -            }) +            # There should be only one entry in the dictionary +            key, stream_url = list(json.loads(stream_json).items())[0] +            if key.startswith(u'http'): +                result['url'] = stream_url +            elif key.startswith(u'rtmp'): +                # The url doesn't have an rtmp app, we have to extract the playpath +                url, path = stream_url.split('mp3:', 1) +                result.update({ +                    'url': url, +                    'play_path': 'mp3:' + path, +                }) +            else: +                # We fallback to the stream_url in the original info, this +                # cannot be always used, sometimes it can give an HTTP 404 error +                result['url'] = info['stream_url'] + '?client_id=' + self._CLIENT_ID, +          return result      def _real_extract(self, url): @@ -158,7 +168,6 @@ class SoundcloudSetIE(SoundcloudIE):          resolv_url = self._resolv_url(url)          info_json = self._download_webpage(resolv_url, full_title) -        videos = []          info = json.loads(info_json)          if 'errors' in info:              for err in info['errors']: diff --git a/youtube_dl/extractor/southparkstudios.py b/youtube_dl/extractor/southparkstudios.py index b1e96b679..a711531e6 100644 --- a/youtube_dl/extractor/southparkstudios.py +++ b/youtube_dl/extractor/southparkstudios.py @@ -5,21 +5,19 @@ from .mtv import MTVIE, _media_xml_tag  class SouthParkStudiosIE(MTVIE):      IE_NAME = u'southparkstudios.com' -    _VALID_URL = r'https?://www\.southparkstudios\.com/(clips|full-episodes)/(?P<id>.+?)(\?|#|$)' +    _VALID_URL = r'(https?://)?(www\.)?(?P<url>southparkstudios\.com/(clips|full-episodes)/(?P<id>.+?)(\?|#|$))'      _FEED_URL = 'http://www.southparkstudios.com/feeds/video-player/mrss' -    _TEST = { +    # Overwrite MTVIE properties we don't want +    _TESTS = [{          u'url': u'http://www.southparkstudios.com/clips/104437/bat-daded#tab=featured',          u'file': u'a7bff6c2-ed00-11e0-aca6-0026b9414f30.mp4',          u'info_dict': {              u'title': u'Bat Daded',              u'description': u'Randy disqualifies South Park by getting into a fight with Bat Dad.',          }, -    } - -    # Overwrite MTVIE properties we don't want -    _TESTS = [] +    }]      def _get_thumbnail_url(self, uri, itemdoc):          search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail')) @@ -31,8 +29,23 @@ class SouthParkStudiosIE(MTVIE):      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url) +        url = u'http://www.' + mobj.group(u'url')          video_id = mobj.group('id')          webpage = self._download_webpage(url, video_id)          mgid = self._search_regex(r'swfobject.embedSWF\(".*?(mgid:.*?)"',                                    webpage, u'mgid')          return self._get_videos_info(mgid) + +class SouthparkDeIE(SouthParkStudiosIE): +    IE_NAME = u'southpark.de' +    _VALID_URL = r'(https?://)?(www\.)?(?P<url>southpark\.de/(clips|alle-episoden)/(?P<id>.+?)(\?|#|$))' +    _FEED_URL = 'http://www.southpark.de/feeds/video-player/mrss/' + +    _TESTS = [{ +        u'url': u'http://www.southpark.de/clips/uygssh/the-government-wont-respect-my-privacy#tab=featured', +        u'file': u'85487c96-b3b9-4e39-9127-ad88583d9bf2.mp4', +        u'info_dict': { +            u'title': u'The Government Won\'t Respect My Privacy', +            u'description': u'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.', +        }, +    }] diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py index 97f9c268a..9e2ad0d99 100644 --- a/youtube_dl/extractor/spankwire.py +++ b/youtube_dl/extractor/spankwire.py @@ -6,7 +6,6 @@ from ..utils import (      compat_urllib_parse_urlparse,      compat_urllib_request,      compat_urllib_parse, -    unescapeHTML,  )  from ..aes import (      aes_decrypt_text @@ -36,11 +35,12 @@ class SpankwireIE(InfoExtractor):          webpage = self._download_webpage(req, video_id)          video_title = self._html_search_regex(r'<h1>([^<]+)', webpage, u'title') -        video_uploader = self._html_search_regex(r'by:\s*<a [^>]*>(.+?)</a>', webpage, u'uploader', fatal=False) -        thumbnail = self._html_search_regex(r'flashvars\.image_url = "([^"]+)', webpage, u'thumbnail', fatal=False) -        description = self._html_search_regex(r'>\s*Description:</div>\s*<[^>]*>([^<]+)', webpage, u'description', fatal=False) -        if len(description) == 0: -            description = None +        video_uploader = self._html_search_regex( +            r'by:\s*<a [^>]*>(.+?)</a>', webpage, u'uploader', fatal=False) +        thumbnail = self._html_search_regex( +            r'flashvars\.image_url = "([^"]+)', webpage, u'thumbnail', fatal=False) +        description = self._html_search_regex( +            r'<div\s+id="descriptionContent">([^<]+)<', webpage, u'description', fatal=False)          video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage)))          if webpage.find('flashvars\.encrypted = "true"') != -1: diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py index 13c86401c..19ce585cf 100644 --- a/youtube_dl/extractor/spiegel.py +++ b/youtube_dl/extractor/spiegel.py @@ -6,14 +6,22 @@ from .common import InfoExtractor  class SpiegelIE(InfoExtractor):      _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$' -    _TEST = { +    _TESTS = [{          u'url': u'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',          u'file': u'1259285.mp4',          u'md5': u'2c2754212136f35fb4b19767d242f66e',          u'info_dict': {              u"title": u"Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"          } -    } +    }, +    { +        u'url': u'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html', +        u'file': u'1309159.mp4', +        u'md5': u'f2cdf638d7aa47654e251e1aee360af1', +        u'info_dict': { +            u'title': u'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers' +        } +    }]      def _real_extract(self, url):          m = re.match(self._VALID_URL, url) @@ -21,25 +29,38 @@ class SpiegelIE(InfoExtractor):          webpage = self._download_webpage(url, video_id) -        video_title = self._html_search_regex(r'<div class="module-title">(.*?)</div>', -            webpage, u'title') +        video_title = self._html_search_regex( +            r'<div class="module-title">(.*?)</div>', webpage, u'title')          xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml' -        xml_code = self._download_webpage(xml_url, video_id, -                    note=u'Downloading XML', errnote=u'Failed to download XML') +        xml_code = self._download_webpage( +            xml_url, video_id, +            note=u'Downloading XML', errnote=u'Failed to download XML')          idoc = xml.etree.ElementTree.fromstring(xml_code) -        last_type = idoc[-1] -        filename = last_type.findall('./filename')[0].text -        duration = float(last_type.findall('./duration')[0].text) -        video_url = 'http://video2.spiegel.de/flash/' + filename -        video_ext = filename.rpartition('.')[2] +        formats = [ +            { +                'format_id': n.tag.rpartition('type')[2], +                'url': u'http://video2.spiegel.de/flash/' + n.find('./filename').text, +                'width': int(n.find('./width').text), +                'height': int(n.find('./height').text), +                'abr': int(n.find('./audiobitrate').text), +                'vbr': int(n.find('./videobitrate').text), +                'vcodec': n.find('./codec').text, +                'acodec': 'MP4A', +            } +            for n in list(idoc) +            # Blacklist type 6, it's extremely LQ and not available on the same server +            if n.tag.startswith('type') and n.tag != 'type6' +        ] +        formats.sort(key=lambda f: f['vbr']) +        duration = float(idoc[0].findall('./duration')[0].text) +          info = {              'id': video_id, -            'url': video_url, -            'ext': video_ext,              'title': video_title,              'duration': duration, +            'formats': formats,          } -        return [info] +        return info diff --git a/youtube_dl/extractor/streamcloud.py b/youtube_dl/extractor/streamcloud.py new file mode 100644 index 000000000..d476693ec --- /dev/null +++ b/youtube_dl/extractor/streamcloud.py @@ -0,0 +1,65 @@ +# coding: utf-8 +import re +import time + +from .common import InfoExtractor +from ..utils import ( +    compat_urllib_parse, +    compat_urllib_request, +) + + +class StreamcloudIE(InfoExtractor): +    IE_NAME = u'streamcloud.eu' +    _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)/(?P<fname>[^#?]*)\.html' + +    _TEST = { +        u'url': u'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html', +        u'file': u'skp9j99s4bpz.mp4', +        u'md5': u'6bea4c7fa5daaacc2a946b7146286686', +        u'info_dict': { +            u'title': u'youtube-dl test video  \'/\\ ä ↭', +            u'duration': 9, +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') + +        orig_webpage = self._download_webpage(url, video_id) + +        fields = re.findall(r'''(?x)<input\s+ +            type="(?:hidden|submit)"\s+ +            name="([^"]+)"\s+ +            (?:id="[^"]+"\s+)? +            value="([^"]*)" +            ''', orig_webpage) +        post = compat_urllib_parse.urlencode(fields) + +        self.to_screen('%s: Waiting for timeout' % video_id) +        time.sleep(12) +        headers = { +            b'Content-Type': b'application/x-www-form-urlencoded', +        } +        req = compat_urllib_request.Request(url, post, headers) + +        webpage = self._download_webpage( +            req, video_id, note=u'Downloading video page ...') +        title = self._html_search_regex( +            r'<h1[^>]*>([^<]+)<', webpage, u'title') +        video_url = self._search_regex( +            r'file:\s*"([^"]+)"', webpage, u'video URL') +        duration_str = self._search_regex( +            r'duration:\s*"?([0-9]+)"?', webpage, u'duration', fatal=False) +        duration = None if duration_str is None else int(duration_str) +        thumbnail = self._search_regex( +            r'image:\s*"([^"]+)"', webpage, u'thumbnail URL', fatal=False) + +        return { +            'id': video_id, +            'title': title, +            'url': video_url, +            'duration': duration, +            'thumbnail': thumbnail, +        } diff --git a/youtube_dl/extractor/sztvhu.py b/youtube_dl/extractor/sztvhu.py index 81fa35c4b..c9359fafb 100644 --- a/youtube_dl/extractor/sztvhu.py +++ b/youtube_dl/extractor/sztvhu.py @@ -15,7 +15,8 @@ class SztvHuIE(InfoExtractor):          u'info_dict': {              u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren",              u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', -        } +        }, +        u'skip': u'Service temporarily disabled as of 2013-11-20'      }      def _real_extract(self, url): diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py index bc48620f0..165d9f88b 100644 --- a/youtube_dl/extractor/teamcoco.py +++ b/youtube_dl/extractor/teamcoco.py @@ -60,7 +60,7 @@ class TeamcocoIE(InfoExtractor):                  return -1          formats.sort(key=sort_key)          if not formats: -            raise RegexNotFoundError(u'Unable to extract video URL') +            raise ExtractorError(u'Unable to extract video URL')          return {              'id':          video_id, diff --git a/youtube_dl/extractor/ted.py b/youtube_dl/extractor/ted.py index 76cfdfb90..4bca62ba0 100644 --- a/youtube_dl/extractor/ted.py +++ b/youtube_dl/extractor/ted.py @@ -4,7 +4,6 @@ import re  from .subtitles import SubtitlesInfoExtractor  from ..utils import ( -    compat_str,      RegexNotFoundError,  ) @@ -43,26 +42,25 @@ class TEDIE(SubtitlesInfoExtractor):              self.to_screen(u'Getting info of playlist %s: "%s"' % (playlist_id,name))              return [self._playlist_videos_info(url,name,playlist_id)] -    def _playlist_videos_info(self,url,name,playlist_id=0): + +    def _playlist_videos_info(self, url, name, playlist_id):          '''Returns the videos of the playlist''' -        video_RE=r''' -                     <li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)" -                     ([.\s]*?)data-playlist_item_id="(\d+)" -                     ([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)" -                     ''' -        video_name_RE=r'<p\ class="talk-title"><a href="(?P<talk_url>/talks/(.+).html)">(?P<fullname>.+?)</a></p>' -        webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage') -        m_videos=re.finditer(video_RE,webpage,re.VERBOSE) -        m_names=re.finditer(video_name_RE,webpage) + +        webpage = self._download_webpage( +            url, playlist_id, u'Downloading playlist webpage') +        matches = re.finditer( +            r'<p\s+class="talk-title[^"]*"><a\s+href="(?P<talk_url>/talks/[^"]+\.html)">[^<]*</a></p>', +            webpage)          playlist_title = self._html_search_regex(r'div class="headline">\s*?<h1>\s*?<span>(.*?)</span>',                                                   webpage, 'playlist title') -        playlist_entries = [] -        for m_video, m_name in zip(m_videos,m_names): -            talk_url='http://www.ted.com%s' % m_name.group('talk_url') -            playlist_entries.append(self.url_result(talk_url, 'TED')) -        return self.playlist_result(playlist_entries, playlist_id = playlist_id, playlist_title = playlist_title) +        playlist_entries = [ +            self.url_result(u'http://www.ted.com' + m.group('talk_url'), 'TED') +            for m in matches +        ] +        return self.playlist_result( +            playlist_entries, playlist_id=playlist_id, playlist_title=playlist_title)      def _talk_info(self, url, video_id=0):          """Return the video for the talk in the url""" @@ -85,7 +83,7 @@ class TEDIE(SubtitlesInfoExtractor):              'ext': 'mp4',              'url': stream['file'],              'format': stream['id'] -            } for stream in info['htmlStreams']] +        } for stream in info['htmlStreams']]          video_id = info['id'] @@ -95,7 +93,7 @@ class TEDIE(SubtitlesInfoExtractor):              self._list_available_subtitles(video_id, webpage)              return -        info = { +        return {              'id': video_id,              'title': title,              'thumbnail': thumbnail, @@ -104,11 +102,6 @@ class TEDIE(SubtitlesInfoExtractor):              'formats': formats,          } -        # TODO: Remove when #980 has been merged -        info.update(info['formats'][-1]) - -        return info -      def _get_available_subtitles(self, video_id, webpage):          try:              options = self._search_regex(r'(?:<select name="subtitles_language_select" id="subtitles_language_select">)(.*?)(?:</select>)', webpage, 'subtitles_language_select', flags=re.DOTALL) @@ -119,6 +112,6 @@ class TEDIE(SubtitlesInfoExtractor):                      url = 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/srt' % (video_id, l)                      sub_lang_list[l] = url                  return sub_lang_list -        except RegexNotFoundError as err: +        except RegexNotFoundError:              self._downloader.report_warning(u'video doesn\'t have subtitles')          return {} diff --git a/youtube_dl/extractor/toutv.py b/youtube_dl/extractor/toutv.py new file mode 100644 index 000000000..2f728d3dc --- /dev/null +++ b/youtube_dl/extractor/toutv.py @@ -0,0 +1,74 @@ +# coding: utf-8 +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( +    ExtractorError, +    unified_strdate, +) + + +class TouTvIE(InfoExtractor): +    IE_NAME = u'tou.tv' +    _VALID_URL = r'https?://www\.tou\.tv/(?P<id>[a-zA-Z0-9_-]+(?:/(?P<episode>S[0-9]+E[0-9]+)))' + +    _TEST = { +        u'url': u'http://www.tou.tv/30-vies/S04E41', +        u'file': u'30-vies_S04E41.mp4', +        u'info_dict': { +            u'title': u'30 vies Saison 4 / Épisode 41', +            u'description': u'md5:da363002db82ccbe4dafeb9cab039b09', +            u'age_limit': 8, +            u'uploader': u'Groupe des Nouveaux Médias', +            u'duration': 1296, +            u'upload_date': u'20131118', +            u'thumbnail': u'http://static.tou.tv/medias/images/2013-11-18_19_00_00_30VIES_0341_01_L.jpeg', +        }, +        u'params': { +            u'skip_download': True,  # Requires rtmpdump +        }, +        u'skip': 'Only available in Canada' +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') +        webpage = self._download_webpage(url, video_id) + +        mediaId = self._search_regex( +            r'"idMedia":\s*"([^"]+)"', webpage, u'media ID') + +        streams_url = u'http://release.theplatform.com/content.select?pid=' + mediaId +        streams_webpage = self._download_webpage( +            streams_url, video_id, note=u'Downloading stream list') + +        streams_doc = xml.etree.ElementTree.fromstring( +            streams_webpage.encode('utf-8')) +        video_url = next(n.text +                         for n in streams_doc.findall('.//choice/url') +                         if u'//ad.doubleclick' not in n.text) +        if video_url.endswith('/Unavailable.flv'): +            raise ExtractorError( +                u'Access to this video is blocked from outside of Canada', +                expected=True) + +        duration_str = self._html_search_meta( +            'video:duration', webpage, u'duration') +        duration = int(duration_str) if duration_str else None +        upload_date_str = self._html_search_meta( +            'video:release_date', webpage, u'upload date') +        upload_date = unified_strdate(upload_date_str) if upload_date_str else None + +        return { +            'id': video_id, +            'title': self._og_search_title(webpage), +            'url': video_url, +            'description': self._og_search_description(webpage), +            'uploader': self._dc_search_uploader(webpage), +            'thumbnail': self._og_search_thumbnail(webpage), +            'age_limit': self._media_rating_search(webpage), +            'duration': duration, +            'upload_date': upload_date, +            'ext': 'mp4', +        } diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py index d4b7603c7..4d9d41db3 100644 --- a/youtube_dl/extractor/tube8.py +++ b/youtube_dl/extractor/tube8.py @@ -5,8 +5,6 @@ from .common import InfoExtractor  from ..utils import (      compat_urllib_parse_urlparse,      compat_urllib_request, -    compat_urllib_parse, -    unescapeHTML,  )  from ..aes import (      aes_decrypt_text diff --git a/youtube_dl/extractor/tvp.py b/youtube_dl/extractor/tvp.py index 32e0f5037..bfed9dd04 100644 --- a/youtube_dl/extractor/tvp.py +++ b/youtube_dl/extractor/tvp.py @@ -13,9 +13,10 @@ class TvpIE(InfoExtractor):          u'md5': u'148408967a6a468953c0a75cbdaf0d7a',          u'file': u'12878238.wmv',          u'info_dict': { -            u'title': u'31.10.2013', -            u'description': u'31.10.2013', +            u'title': u'31.10.2013 - Odcinek 2', +            u'description': u'31.10.2013 - Odcinek 2',          }, +        u'skip': u'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.'      }      def _real_extract(self, url): diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py index 3f6020f74..4378b1780 100644 --- a/youtube_dl/extractor/vevo.py +++ b/youtube_dl/extractor/vevo.py @@ -78,12 +78,13 @@ class VevoIE(InfoExtractor):                  continue              format_url = self._SMIL_BASE_URL + m.group('path') -            format_note = ('%(vcodec)s@%(vbr)4sk, %(acodec)s@%(abr)3sk' % -                           m.groupdict())              formats.append({                  'url': format_url,                  'format_id': u'SMIL_' + m.group('cbr'), -                'format_note': format_note, +                'vcodec': m.group('vcodec'), +                'acodec': m.group('acodec'), +                'vbr': int(m.group('vbr')), +                'abr': int(m.group('abr')),                  'ext': m.group('ext'),                  'width': int(m.group('width')),                  'height': int(m.group('height')), diff --git a/youtube_dl/extractor/videopremium.py b/youtube_dl/extractor/videopremium.py index 65f39b982..4800415bd 100644 --- a/youtube_dl/extractor/videopremium.py +++ b/youtube_dl/extractor/videopremium.py @@ -24,12 +24,16 @@ class VideoPremiumIE(InfoExtractor):          webpage_url = 'http://videopremium.tv/' + video_id          webpage = self._download_webpage(webpage_url, video_id) -        self.report_extraction(video_id) +        if re.match(r"^<html><head><script[^>]*>window.location\s*=", webpage): +            # Download again, we need a cookie +            webpage = self._download_webpage( +                webpage_url, video_id, +                note=u'Downloading webpage again (with cookie)') -        video_title = self._html_search_regex(r'<h2(?:.*?)>\s*(.+?)\s*<', -            webpage, u'video title') +        video_title = self._html_search_regex( +            r'<h2(?:.*?)>\s*(.+?)\s*<', webpage, u'video title') -        return [{ +        return {              'id':          video_id,              'url':         "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),              'play_path':   "mp4:%s.f4v" % video_id, @@ -37,4 +41,4 @@ class VideoPremiumIE(InfoExtractor):              'player_url':  "http://videopremium.tv/uplayer/uppod.swf",              'ext':         'f4v',              'title':       video_title, -        }] +        }
\ No newline at end of file diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index d465bf20b..7d82c2cfa 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -151,7 +151,7 @@ class VimeoIE(InfoExtractor):                  config = json.loads(config_json)              except RegexNotFoundError:                  # For pro videos or player.vimeo.com urls -                config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'], +                config = self._search_regex([r' = {config:({.+?}),assets:', r'(?:c|b)=({.+?});'],                      webpage, u'info section', flags=re.DOTALL)                  config = json.loads(config)          except Exception as e: diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index 03ad88bed..e3458d2bd 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -5,7 +5,6 @@ from .common import InfoExtractor  from ..utils import (      compat_urllib_parse_urlparse,      compat_urllib_request, -    compat_urllib_parse,  )  class XTubeIE(InfoExtractor): diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index c48c0e24f..9b09793eb 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -139,10 +139,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):  class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):      IE_DESC = u'YouTube.com' -    _VALID_URL = r"""^ +    _VALID_URL = r"""(?x)^                       ( -                         (?:https?://)?                                       # http(s):// (optional) -                         (?:(?:(?:(?:\w+\.)?youtube(?:-nocookie)?\.com/| +                         (?:https?://|//)?                                    # http(s):// or protocol-independent URL (optional) +                         (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|                              tube\.majestyc\.net/|                              youtube\.googleapis\.com/)                        # the various hostnames, with wildcard subdomains                           (?:.*?\#/)?                                          # handle anchor (#/) redirect urls @@ -363,6 +363,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):                  u"uploader_id": u"justintimberlakeVEVO"              }          }, +        { +            u"url":  u"//www.YouTube.com/watch?v=yZIXLfi8CZQ", +            u"file":  u"yZIXLfi8CZQ.mp4", +            u"note": u"Embed-only video (#1746)", +            u"info_dict": { +                u"upload_date": u"20120608", +                u"title": u"Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012", +                u"description": u"md5:09b78bd971f1e3e289601dfba15ca4f7", +                u"uploader": u"SET India", +                u"uploader_id": u"setindia" +            } +        },      ] @@ -370,7 +382,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):      def suitable(cls, url):          """Receives a URL and returns True if suitable for this IE."""          if YoutubePlaylistIE.suitable(url): return False -        return re.match(cls._VALID_URL, url, re.VERBOSE) is not None +        return re.match(cls._VALID_URL, url) is not None      def __init__(self, *args, **kwargs):          super(YoutubeIE, self).__init__(*args, **kwargs) @@ -1019,6 +1031,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):          """Turn the encrypted s field into a working signature"""          if player_url is not None: +            if player_url.startswith(u'//'): +                player_url = u'https:' + player_url              try:                  player_id = (player_url, len(s))                  if player_id not in self._player_cache: @@ -1098,7 +1112,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):              params = compat_urllib_parse.urlencode({                  'lang': lang,                  'v': video_id, -                'fmt': self._downloader.params.get('subtitlesformat'), +                'fmt': self._downloader.params.get('subtitlesformat', 'srt'),                  'name': l[0].encode('utf-8'),              })              url = u'http://www.youtube.com/api/timedtext?' + params @@ -1111,7 +1125,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):      def _get_available_automatic_caption(self, video_id, webpage):          """We need the webpage for getting the captions url, pass it as an             argument to speed up the process.""" -        sub_format = self._downloader.params.get('subtitlesformat') +        sub_format = self._downloader.params.get('subtitlesformat', 'srt')          self.to_screen(u'%s: Looking for automatic captions' % video_id)          mobj = re.search(r';ytplayer.config = ({.*?});', webpage)          err_msg = u'Couldn\'t find automatic captions for %s' % video_id @@ -1270,7 +1284,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):              # We simulate the access to the video from www.youtube.com/v/{video_id}              # this can be viewed without login into Youtube              data = compat_urllib_parse.urlencode({'video_id': video_id, -                                                  'el': 'embedded', +                                                  'el': 'player_embedded',                                                    'gl': 'US',                                                    'hl': 'en',                                                    'eurl': 'https://youtube.googleapis.com/v/' + video_id, @@ -1299,6 +1313,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):              else:                  raise ExtractorError(u'"token" parameter not in video info for unknown reason') +        if 'view_count' in video_info: +            view_count = int(video_info['view_count'][0]) +        else: +            view_count = None +          # Check for "rental" videos          if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:              raise ExtractorError(u'"rental" videos not supported') @@ -1487,6 +1506,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):                  'age_limit':    18 if age_gate else 0,                  'annotations':  video_annotations,                  'webpage_url': 'https://www.youtube.com/watch?v=%s' % video_id, +                'view_count': view_count,              })          return results @@ -1578,20 +1598,31 @@ class YoutubeChannelIE(InfoExtractor):          # Download channel page          channel_id = mobj.group(1)          video_ids = [] +        url = 'https://www.youtube.com/channel/%s/videos' % channel_id +        channel_page = self._download_webpage(url, channel_id) +        if re.search(r'channel-header-autogenerated-label', channel_page) is not None: +            autogenerated = True +        else: +            autogenerated = False -        # Download all channel pages using the json-based channel_ajax query -        for pagenum in itertools.count(1): -            url = self._MORE_PAGES_URL % (pagenum, channel_id) -            page = self._download_webpage(url, channel_id, -                                          u'Downloading page #%s' % pagenum) - -            page = json.loads(page) - -            ids_in_page = self.extract_videos_from_page(page['content_html']) -            video_ids.extend(ids_in_page) - -            if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']: -                break +        if autogenerated: +            # The videos are contained in a single page +            # the ajax pages can't be used, they are empty +            video_ids = self.extract_videos_from_page(channel_page) +        else: +            # Download all channel pages using the json-based channel_ajax query +            for pagenum in itertools.count(1): +                url = self._MORE_PAGES_URL % (pagenum, channel_id) +                page = self._download_webpage(url, channel_id, +                                              u'Downloading page #%s' % pagenum) +     +                page = json.loads(page) +     +                ids_in_page = self.extract_videos_from_page(page['content_html']) +                video_ids.extend(ids_in_page) +     +                if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']: +                    break          self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids))) diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py index faed7ff7f..c6a9d06f2 100644 --- a/youtube_dl/extractor/zdf.py +++ b/youtube_dl/extractor/zdf.py @@ -53,7 +53,7 @@ class ZDFIE(InfoExtractor):              video_id,              u'Get stream URL') -        MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"' +        #MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"'          RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)'          mobj = re.search(self._MEDIA_STREAM, media_link) diff --git a/youtube_dl/update.py b/youtube_dl/update.py index 0689a4891..e5f441707 100644 --- a/youtube_dl/update.py +++ b/youtube_dl/update.py @@ -2,11 +2,15 @@ import io  import json  import traceback  import hashlib +import os  import subprocess  import sys  from zipimport import zipimporter -from .utils import * +from .utils import ( +    compat_str, +    compat_urllib_request, +)  from .version import __version__  def rsa_verify(message, signature, key): @@ -105,7 +109,7 @@ def update_self(to_screen, verbose):              urlh = compat_urllib_request.urlopen(version['exe'][0])              newcontent = urlh.read()              urlh.close() -        except (IOError, OSError) as err: +        except (IOError, OSError):              if verbose: to_screen(compat_str(traceback.format_exc()))              to_screen(u'ERROR: unable to download latest version')              return @@ -118,7 +122,7 @@ def update_self(to_screen, verbose):          try:              with open(exe + '.new', 'wb') as outf:                  outf.write(newcontent) -        except (IOError, OSError) as err: +        except (IOError, OSError):              if verbose: to_screen(compat_str(traceback.format_exc()))              to_screen(u'ERROR: unable to write the new version')              return @@ -137,7 +141,7 @@ start /b "" cmd /c del "%%~f0"&exit /b"              subprocess.Popen([bat])  # Continues to run in the background              return  # Do not show premature success messages -        except (IOError, OSError) as err: +        except (IOError, OSError):              if verbose: to_screen(compat_str(traceback.format_exc()))              to_screen(u'ERROR: unable to overwrite current version')              return @@ -148,7 +152,7 @@ start /b "" cmd /c del "%%~f0"&exit /b"              urlh = compat_urllib_request.urlopen(version['bin'][0])              newcontent = urlh.read()              urlh.close() -        except (IOError, OSError) as err: +        except (IOError, OSError):              if verbose: to_screen(compat_str(traceback.format_exc()))              to_screen(u'ERROR: unable to download latest version')              return @@ -161,7 +165,7 @@ start /b "" cmd /c del "%%~f0"&exit /b"          try:              with open(filename, 'wb') as outf:                  outf.write(newcontent) -        except (IOError, OSError) as err: +        except (IOError, OSError):              if verbose: to_screen(compat_str(traceback.format_exc()))              to_screen(u'ERROR: unable to overwrite current version')              return diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 1d9785341..0720fe9eb 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -734,6 +734,8 @@ def unified_strdate(date_str):          '%Y/%m/%d %H:%M:%S',          '%d.%m.%Y %H:%M',          '%Y-%m-%dT%H:%M:%SZ', +        '%Y-%m-%dT%H:%M:%S.%fZ', +        '%Y-%m-%dT%H:%M:%S.%f0Z',          '%Y-%m-%dT%H:%M:%S',      ]      for expression in format_expressions: @@ -949,7 +951,16 @@ class locked_file(object):  def shell_quote(args): -    return ' '.join(map(pipes.quote, args)) +    quoted_args = [] +    encoding = sys.getfilesystemencoding() +    if encoding is None: +        encoding = 'utf-8' +    for a in args: +        if isinstance(a, bytes): +            # We may get a filename encoded with 'encodeFilename' +            a = a.decode(encoding) +        quoted_args.append(pipes.quote(a)) +    return u' '.join(quoted_args)  def takewhile_inclusive(pred, seq): diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 26b91105f..c1f581cd6 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,2 +1,2 @@ -__version__ = '2013.11.13' +__version__ = '2013.11.22' | 
