diff options
Diffstat (limited to 'youtube_dl/extractor')
43 files changed, 1179 insertions, 311 deletions
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index 748f12e5a..bcf1cce7f 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -72,6 +72,7 @@ from .jeuxvideo import JeuxVideoIE  from .jukebox import JukeboxIE  from .justintv import JustinTVIE  from .kankan import KankanIE +from .keezmovies import KeezMoviesIE  from .kickstarter import KickStarterIE  from .keek import KeekIE  from .liveleak import LiveLeakIE @@ -82,6 +83,7 @@ from .mit import TechTVMITIE, MITIE  from .mixcloud import MixcloudIE  from .mtv import MTVIE  from .muzu import MuzuTVIE +from .myspace import MySpaceIE  from .myspass import MySpassIE  from .myvideo import MyVideoIE  from .naver import NaverIE @@ -94,6 +96,7 @@ from .ooyala import OoyalaIE  from .orf import ORFIE  from .pbs import PBSIE  from .photobucket import PhotobucketIE +from .pornhub import PornHubIE  from .pornotube import PornotubeIE  from .rbmaradio import RBMARadioIE  from .redtube import RedTubeIE @@ -102,22 +105,27 @@ from .ro220 import Ro220IE  from .rottentomatoes import RottenTomatoesIE  from .roxwel import RoxwelIE  from .rtlnow import RTLnowIE +from .rutube import RutubeIE  from .sina import SinaIE  from .slashdot import SlashdotIE  from .slideshare import SlideshareIE  from .sohu import SohuIE  from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE  from .southparkstudios import SouthParkStudiosIE +from .spankwire import SpankwireIE  from .spiegel import SpiegelIE  from .stanfordoc import StanfordOpenClassroomIE  from .statigram import StatigramIE  from .steam import SteamIE +from .sztvhu import SztvHuIE  from .teamcoco import TeamcocoIE +from .techtalks import TechTalksIE  from .ted import TEDIE  from .tf1 import TF1IE  from .thisav import ThisAVIE  from .traileraddict import TrailerAddictIE  from .trilulilu import TriluliluIE +from .tube8 import Tube8IE  from .tudou import TudouIE  from .tumblr import TumblrIE  from .tutv import TutvIE @@ -134,7 +142,9 @@ from .videofyme import VideofyMeIE  from .videopremium import VideoPremiumIE  from .vimeo import VimeoIE, VimeoChannelIE  from .vine import VineIE +from .vk import VKIE  from .wat import WatIE +from .websurg import WeBSurgIE  from .weibo import WeiboIE  from .wimp import WimpIE  from .worldstarhiphop import WorldStarHipHopIE diff --git a/youtube_dl/extractor/addanime.py b/youtube_dl/extractor/addanime.py index 82a785a19..b99d4b966 100644 --- a/youtube_dl/extractor/addanime.py +++ b/youtube_dl/extractor/addanime.py @@ -17,8 +17,8 @@ class AddAnimeIE(InfoExtractor):      IE_NAME = u'AddAnime'      _TEST = {          u'url': u'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9', -        u'file': u'24MR3YO5SAS9.flv', -        u'md5': u'1036a0e0cd307b95bd8a8c3a5c8cfaf1', +        u'file': u'24MR3YO5SAS9.mp4', +        u'md5': u'72954ea10bc979ab5e2eb288b21425a0',          u'info_dict': {              u"description": u"One Piece 606",              u"title": u"One Piece 606" @@ -31,7 +31,8 @@ class AddAnimeIE(InfoExtractor):              video_id = mobj.group('video_id')              webpage = self._download_webpage(url, video_id)          except ExtractorError as ee: -            if not isinstance(ee.cause, compat_HTTPError): +            if not isinstance(ee.cause, compat_HTTPError) or \ +               ee.cause.code != 503:                  raise              redir_webpage = ee.cause.read().decode('utf-8') @@ -60,16 +61,26 @@ class AddAnimeIE(InfoExtractor):                  note=u'Confirming after redirect')              webpage = self._download_webpage(url, video_id) -        video_url = self._search_regex(r"var normal_video_file = '(.*?)';", -                                       webpage, u'video file URL') +        formats = [] +        for format_id in ('normal', 'hq'): +            rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id) +            video_url = self._search_regex(rex, webpage, u'video file URLx', +                                           fatal=False) +            if not video_url: +                continue +            formats.append({ +                'format_id': format_id, +                'url': video_url, +            }) +        if not formats: +            raise ExtractorError(u'Cannot find any video format!')          video_title = self._og_search_title(webpage)          video_description = self._og_search_description(webpage)          return {              '_type': 'video',              'id':  video_id, -            'url': video_url, -            'ext': 'flv', +            'formats': formats,              'title': video_title,              'description': video_description          } diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index 5ee8a67b1..e10c74c11 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -158,7 +158,9 @@ class ArteTVPlus7IE(InfoExtractor):              'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),          } -        formats = player_info['VSR'].values() +        all_formats = player_info['VSR'].values() +        # Some formats use the m3u8 protocol +        all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))          def _match_lang(f):              if f.get('versionCode') is None:                  return True @@ -170,16 +172,36 @@ class ArteTVPlus7IE(InfoExtractor):              regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]              return any(re.match(r, f['versionCode']) for r in regexes)          # Some formats may not be in the same language as the url -        formats = filter(_match_lang, formats) -        # Some formats use the m3u8 protocol -        formats = filter(lambda f: f.get('videoFormat') != 'M3U8', formats) +        formats = filter(_match_lang, all_formats) +        formats = list(formats) # in python3 filter returns an iterator +        if not formats: +            # Some videos are only available in the 'Originalversion' +            # they aren't tagged as being in French or German +            if all(f['versionCode'] == 'VO' for f in all_formats): +                formats = all_formats +            else: +                raise ExtractorError(u'The formats list is empty')          # We order the formats by quality -        formats = sorted(formats, key=lambda f: int(f.get('height',-1))) +        if re.match(r'[A-Z]Q', formats[0]['quality']) is not None: +            sort_key = lambda f: ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality']) +        else: +            sort_key = lambda f: int(f.get('height',-1)) +        formats = sorted(formats, key=sort_key)          # Prefer videos without subtitles in the same language          formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f.get('versionCode', '')) is None)          # Pick the best quality          def _format(format_info): +            quality = format_info['quality'] +            m_quality = re.match(r'\w*? - (\d*)p', quality) +            if m_quality is not None: +                quality = m_quality.group(1) +            if format_info.get('versionCode') is not None: +                format_id = u'%s-%s' % (quality, format_info['versionCode']) +            else: +                format_id = quality              info = { +                'format_id': format_id, +                'format_note': format_info.get('versionLibelle'),                  'width': format_info.get('width'),                  'height': format_info.get('height'),              } @@ -192,8 +214,6 @@ class ArteTVPlus7IE(InfoExtractor):                  info['ext'] = determine_ext(info['url'])              return info          info_dict['formats'] = [_format(f) for f in formats] -        # TODO: Remove when #980 has been merged  -        info_dict.update(info_dict['formats'][-1])          return info_dict @@ -207,7 +227,7 @@ class ArteTVCreativeIE(ArteTVPlus7IE):          u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',          u'file': u'050489-002.mp4',          u'info_dict': { -            u'title': u'Agentur Amateur #2 - Corporate Design', +            u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',          },      } diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 745212f2f..1392f382a 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -53,6 +53,8 @@ class BrightcoveIE(InfoExtractor):          # Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553          object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>',                              lambda m: m.group(1) + '/>', object_str) +        # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608 +        object_str = object_str.replace(u'<--', u'<!--')          object_doc = xml.etree.ElementTree.fromstring(object_str)          assert u'BrightcoveExperience' in object_doc.attrib['class'] @@ -96,7 +98,10 @@ class BrightcoveIE(InfoExtractor):          playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,                                                 player_key, u'Downloading playlist information') -        playlist_info = json.loads(playlist_info)['videoList'] +        json_data = json.loads(playlist_info) +        if 'videoList' not in json_data: +            raise ExtractorError(u'Empty playlist') +        playlist_info = json_data['videoList']          videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]          return self.playlist_result(videos, playlist_id=playlist_info['id'], diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py index 6925b96c2..2fe1033f0 100644 --- a/youtube_dl/extractor/cinemassacre.py +++ b/youtube_dl/extractor/cinemassacre.py @@ -55,30 +55,30 @@ class CinemassacreIE(InfoExtractor):              video_description = None          playerdata = self._download_webpage(playerdata_url, video_id) -        base_url = self._html_search_regex(r'\'streamer\': \'(?P<base_url>rtmp://.*?)/(?:vod|Cinemassacre)\'', -            playerdata, u'base_url') -        base_url += '/Cinemassacre/' -        # Important: The file names in playerdata are not used by the player and even wrong for some videos -        sd_file = 'Cinemassacre-%s_high.mp4' % video_id -        hd_file = 'Cinemassacre-%s.mp4' % video_id -        video_thumbnail = 'http://image.screenwavemedia.com/Cinemassacre/Cinemassacre-%s_thumb_640x360.jpg' % video_id +        url = self._html_search_regex(r'\'streamer\': \'(?P<url>[^\']+)\'', playerdata, u'url') + +        sd_file = self._html_search_regex(r'\'file\': \'(?P<sd_file>[^\']+)\'', playerdata, u'sd_file') +        hd_file = self._html_search_regex(r'\'?file\'?: "(?P<hd_file>[^"]+)"', playerdata, u'hd_file') +        video_thumbnail = self._html_search_regex(r'\'image\': \'(?P<thumbnail>[^\']+)\'', playerdata, u'thumbnail', fatal=False)          formats = [              { -                'url': base_url + sd_file, +                'url': url, +                'play_path': 'mp4:' + sd_file,                  'ext': 'flv',                  'format': 'sd',                  'format_id': 'sd',              },              { -                'url': base_url + hd_file, +                'url': url, +                'play_path': 'mp4:' + hd_file,                  'ext': 'flv',                  'format': 'hd',                  'format_id': 'hd',              },          ] -        info = { +        return {              'id': video_id,              'title': video_title,              'formats': formats, @@ -86,6 +86,3 @@ class CinemassacreIE(InfoExtractor):              'upload_date': video_date,              'thumbnail': video_thumbnail,          } -        # TODO: Remove when #980 has been merged -        info.update(formats[-1]) -        return info diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 2a5a85dc6..cef4dce85 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -14,6 +14,8 @@ from ..utils import (      clean_html,      compiled_regex_type,      ExtractorError, +    RegexNotFoundError, +    sanitize_filename,      unescapeHTML,  ) @@ -61,9 +63,12 @@ class InfoExtractor(object):                      * ext       Will be calculated from url if missing                      * format    A human-readable description of the format                                  ("mp4 container with h264/opus"). -                                Calculated from width and height if missing. +                                Calculated from the format_id, width, height. +                                and format_note fields if missing.                      * format_id A short description of the format                                  ("mp4_h264_opus" or "19") +                    * format_note Additional info about the format +                                ("3D" or "DASH video")                      * width     Width of the video, if known                      * height    Height of the video, if known @@ -178,6 +183,17 @@ class InfoExtractor(object):              self.to_screen(u'Dumping request to ' + url)              dump = base64.b64encode(webpage_bytes).decode('ascii')              self._downloader.to_screen(dump) +        if self._downloader.params.get('write_pages', False): +            try: +                url = url_or_request.get_full_url() +            except AttributeError: +                url = url_or_request +            raw_filename = ('%s_%s.dump' % (video_id, url)) +            filename = sanitize_filename(raw_filename, restricted=True) +            self.to_screen(u'Saving request to ' + filename) +            with open(filename, 'wb') as outf: +                outf.write(webpage_bytes) +          content = webpage_bytes.decode(encoding, 'replace')          return (content, urlh) @@ -228,7 +244,7 @@ class InfoExtractor(object):          Perform a regex search on the given string, using a single or a list of          patterns returning the first matching group.          In case of failure return a default value or raise a WARNING or a -        ExtractorError, depending on fatal, specifying the field name. +        RegexNotFoundError, depending on fatal, specifying the field name.          """          if isinstance(pattern, (str, compat_str, compiled_regex_type)):              mobj = re.search(pattern, string, flags) @@ -248,7 +264,7 @@ class InfoExtractor(object):          elif default is not None:              return default          elif fatal: -            raise ExtractorError(u'Unable to extract %s' % _name) +            raise RegexNotFoundError(u'Unable to extract %s' % _name)          else:              self._downloader.report_warning(u'unable to extract %s; '                  u'please report this issue on http://yt-dl.org/bug' % _name) @@ -314,10 +330,10 @@ class InfoExtractor(object):      def _og_search_title(self, html, **kargs):          return self._og_search_property('title', html, **kargs) -    def _og_search_video_url(self, html, name='video url', **kargs): -        return self._html_search_regex([self._og_regex('video:secure_url'), -                                        self._og_regex('video')], -                                       html, name, **kargs) +    def _og_search_video_url(self, html, name='video url', secure=True, **kargs): +        regexes = [self._og_regex('video')] +        if secure: regexes.insert(0, self._og_regex('video:secure_url')) +        return self._html_search_regex(regexes, html, name, **kargs)      def _rta_search(self, html):          # See http://www.rtalabel.org/index.php?content=howtofaq#single @@ -365,7 +381,7 @@ class SearchInfoExtractor(InfoExtractor):      def _get_n_results(self, query, n):          """Get a specified number of results for a query""" -        raise NotImplementedError("This method must be implemented by sublclasses") +        raise NotImplementedError("This method must be implemented by subclasses")      @property      def SEARCH_KEY(self): diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 3aef82bcf..e87690f9d 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -21,6 +21,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):          """Build a request with the family filter disabled"""          request = compat_urllib_request.Request(url)          request.add_header('Cookie', 'family_filter=off') +        request.add_header('Cookie', 'ff=off')          return request  class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): @@ -28,6 +29,15 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):      _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'      IE_NAME = u'dailymotion' + +    _FORMATS = [ +        (u'stream_h264_ld_url', u'ld'), +        (u'stream_h264_url', u'standard'), +        (u'stream_h264_hq_url', u'hq'), +        (u'stream_h264_hd_url', u'hd'), +        (u'stream_h264_hd1080_url', u'hd180'), +    ] +      _TESTS = [          {              u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech', @@ -52,6 +62,18 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):              },              u'skip': u'VEVO is only available in some countries',          }, +        # age-restricted video +        { +            u'url': u'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband', +            u'file': u'xyh2zz.mp4', +            u'md5': u'0d667a7b9cebecc3c89ee93099c4159d', +            u'info_dict': { +                u'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]', +                u'uploader': 'HotWaves1012', +                u'age_limit': 18, +            } + +        }      ]      def _real_extract(self, url): @@ -60,7 +82,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):          video_id = mobj.group(1).split('_')[0].split('?')[0] -        video_extension = 'mp4'          url = 'http://www.dailymotion.com/video/%s' % video_id          # Retrieve video webpage to extract further information @@ -82,7 +103,8 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):          video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',                                               # Looking for official user                                               r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'], -                                            webpage, 'video uploader') +                                            webpage, 'video uploader', fatal=False) +        age_limit = self._rta_search(webpage)          video_upload_date = None          mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage) @@ -99,18 +121,24 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):              msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']              raise ExtractorError(msg, expected=True) -        # TODO: support choosing qualities - -        for key in ['stream_h264_hd1080_url','stream_h264_hd_url', -                    'stream_h264_hq_url','stream_h264_url', -                    'stream_h264_ld_url']: -            if info.get(key):#key in info and info[key]: -                max_quality = key -                self.to_screen(u'Using %s' % key) -                break -        else: +        formats = [] +        for (key, format_id) in self._FORMATS: +            video_url = info.get(key) +            if video_url is not None: +                m_size = re.search(r'H264-(\d+)x(\d+)', video_url) +                if m_size is not None: +                    width, height = m_size.group(1), m_size.group(2) +                else: +                    width, height = None, None +                formats.append({ +                    'url': video_url, +                    'ext': 'mp4', +                    'format_id': format_id, +                    'width': width, +                    'height': height, +                }) +        if not formats:              raise ExtractorError(u'Unable to extract video URL') -        video_url = info[max_quality]          # subtitles          video_subtitles = self.extract_subtitles(video_id, webpage) @@ -118,16 +146,16 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):              self._list_available_subtitles(video_id, webpage)              return -        return [{ +        return {              'id':       video_id, -            'url':      video_url, +            'formats': formats,              'uploader': video_uploader,              'upload_date':  video_upload_date,              'title':    self._og_search_title(webpage), -            'ext':      video_extension,              'subtitles':    video_subtitles, -            'thumbnail': info['thumbnail_url'] -        }] +            'thumbnail': info['thumbnail_url'], +            'age_limit': age_limit, +        }      def _get_available_subtitles(self, video_id, webpage):          try: diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py index cced06811..2cfbcd363 100644 --- a/youtube_dl/extractor/eighttracks.py +++ b/youtube_dl/extractor/eighttracks.py @@ -101,7 +101,7 @@ class EightTracksIE(InfoExtractor):          first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)          next_url = first_url          res = [] -        for i in itertools.count(): +        for i in range(track_count):              api_json = self._download_webpage(next_url, playlist_id,                  note=u'Downloading song information %s/%s' % (str(i+1), track_count),                  errnote=u'Failed to download song information') @@ -116,7 +116,5 @@ class EightTracksIE(InfoExtractor):                  'ext': 'm4a',              }              res.append(info) -            if api_data['set']['at_last_track']: -                break              next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])          return res diff --git a/youtube_dl/extractor/exfm.py b/youtube_dl/extractor/exfm.py index 3443f19c5..c74556579 100644 --- a/youtube_dl/extractor/exfm.py +++ b/youtube_dl/extractor/exfm.py @@ -11,14 +11,14 @@ class ExfmIE(InfoExtractor):      _SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud.com/tracks/([^/]+)/stream'      _TESTS = [          { -            u'url': u'http://ex.fm/song/1bgtzg', -            u'file': u'95223130.mp3', -            u'md5': u'8a7967a3fef10e59a1d6f86240fd41cf', +            u'url': u'http://ex.fm/song/eh359', +            u'file': u'44216187.mp3', +            u'md5': u'e45513df5631e6d760970b14cc0c11e7',              u'info_dict': { -                u"title": u"We Can't Stop - Miley Cyrus", -                u"uploader": u"Miley Cyrus", -                u'upload_date': u'20130603', -                u'description': u'Download "We Can\'t Stop" \r\niTunes: http://smarturl.it/WeCantStop?IQid=SC\r\nAmazon: http://smarturl.it/WeCantStopAMZ?IQid=SC', +                u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive", +                u"uploader": u"deadjournalist", +                u'upload_date': u'20120424', +                u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',              },              u'note': u'Soundcloud song',          }, diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index 9d1bc0751..f8bdfc2d3 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -19,7 +19,8 @@ class FacebookIE(InfoExtractor):      """Information Extractor for Facebook"""      _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' -    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' +    _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' +    _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'      _NETRC_MACHINE = 'facebook'      IE_NAME = u'facebook'      _TEST = { @@ -36,50 +37,56 @@ class FacebookIE(InfoExtractor):          """Report attempt to log in."""          self.to_screen(u'Logging in') -    def _real_initialize(self): -        if self._downloader is None: -            return - -        useremail = None -        password = None -        downloader_params = self._downloader.params - -        # Attempt to use provided username and password or .netrc data -        if downloader_params.get('username', None) is not None: -            useremail = downloader_params['username'] -            password = downloader_params['password'] -        elif downloader_params.get('usenetrc', False): -            try: -                info = netrc.netrc().authenticators(self._NETRC_MACHINE) -                if info is not None: -                    useremail = info[0] -                    password = info[2] -                else: -                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) -            except (IOError, netrc.NetrcParseError) as err: -                self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err)) -                return - +    def _login(self): +        (useremail, password) = self._get_login_info()          if useremail is None:              return -        # Log in +        login_page_req = compat_urllib_request.Request(self._LOGIN_URL) +        login_page_req.add_header('Cookie', 'locale=en_US') +        self.report_login() +        login_page = self._download_webpage(login_page_req, None, note=False, +            errnote=u'Unable to download login page') +        lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd') +        lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd') +          login_form = {              'email': useremail,              'pass': password, -            'login': 'Log+In' +            'lsd': lsd, +            'lgnrnd': lgnrnd, +            'next': 'http://facebook.com/home.php', +            'default_persistent': '0', +            'legacy_return': '1', +            'timezone': '-60', +            'trynum': '1',              }          request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) +        request.add_header('Content-Type', 'application/x-www-form-urlencoded')          try: -            self.report_login()              login_results = compat_urllib_request.urlopen(request).read()              if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:                  self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')                  return + +            check_form = { +                'fb_dtsg': self._search_regex(r'"fb_dtsg":"(.*?)"', login_results, u'fb_dtsg'), +                'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, u'nh'), +                'name_action_selected': 'dont_save', +                'submit[Continue]': self._search_regex(r'<input value="(.*?)" name="submit\[Continue\]"', login_results, u'continue'), +            } +            check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, compat_urllib_parse.urlencode(check_form)) +            check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') +            check_response = compat_urllib_request.urlopen(check_req).read() +            if re.search(r'id="checkpointSubmitButton"', check_response) is not None: +                self._downloader.report_warning(u'Unable to confirm login, you have to login in your brower and authorize the login.')          except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:              self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))              return +    def _real_initialize(self): +        self._login() +      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url)          if mobj is None: @@ -93,7 +100,13 @@ class FacebookIE(InfoExtractor):          AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'          m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)          if not m: -            raise ExtractorError(u'Cannot parse data') +            m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage) +            if m_msg is not None: +                raise ExtractorError( +                    u'The video is not available, Facebook said: "%s"' % m_msg.group(1), +                    expected=True) +            else: +                raise ExtractorError(u'Cannot parse data')          data = dict(json.loads(m.group(1)))          params_raw = compat_urllib_parse.unquote(data['params'])          params = json.loads(params_raw) diff --git a/youtube_dl/extractor/faz.py b/youtube_dl/extractor/faz.py index deaa4ed2d..89ed08db4 100644 --- a/youtube_dl/extractor/faz.py +++ b/youtube_dl/extractor/faz.py @@ -5,8 +5,6 @@ import xml.etree.ElementTree  from .common import InfoExtractor  from ..utils import (      determine_ext, -    clean_html, -    get_element_by_attribute,  ) @@ -47,12 +45,12 @@ class FazIE(InfoExtractor):                  'format_id': code.lower(),              }) -        descr_html = get_element_by_attribute('class', 'Content Copy', webpage) +        descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')          info = {              'id': video_id,              'title': self._og_search_title(webpage),              'formats': formats, -            'description': clean_html(descr_html), +            'description': descr,              'thumbnail': config.find('STILL/STILL_BIG').text,          }          # TODO: Remove when #980 has been merged diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index d48c84f8d..2c8fcf5ae 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -11,6 +11,8 @@ from ..utils import (      compat_urlparse,      ExtractorError, +    smuggle_url, +    unescapeHTML,  )  from .brightcove import BrightcoveIE @@ -23,12 +25,33 @@ class GenericIE(InfoExtractor):          {              u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',              u'file': u'13601338388002.mp4', -            u'md5': u'85b90ccc9d73b4acd9138d3af4c27f89', +            u'md5': u'6e15c93721d7ec9e9ca3fdbf07982cfd',              u'info_dict': {                  u"uploader": u"www.hodiho.fr",                  u"title": u"R\u00e9gis plante sa Jeep"              }          }, +        # embedded vimeo video +        { +            u'url': u'http://skillsmatter.com/podcast/home/move-semanticsperfect-forwarding-and-rvalue-references', +            u'file': u'22444065.mp4', +            u'md5': u'2903896e23df39722c33f015af0666e2', +            u'info_dict': { +                u'title': u'ACCU 2011: Move Semantics,Perfect Forwarding, and Rvalue references- Scott Meyers- 13/04/2011', +                u"uploader_id": u"skillsmatter", +                u"uploader": u"Skills Matter", +            } +        }, +        # bandcamp page with custom domain +        { +            u'url': u'http://bronyrock.com/track/the-pony-mash', +            u'file': u'3235767654.mp3', +            u'info_dict': { +                u'title': u'The Pony Mash', +                u'uploader': u'M_Pallante', +            }, +            u'skip': u'There is a limit of 200 free downloads / month for the test song', +        },      ]      def report_download_webpage(self, video_id): @@ -127,6 +150,27 @@ class GenericIE(InfoExtractor):              bc_url = BrightcoveIE._build_brighcove_url(m_brightcove.group())              return self.url_result(bc_url, 'Brightcove') +        # Look for embedded Vimeo player +        mobj = re.search( +            r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage) +        if mobj: +            player_url = unescapeHTML(mobj.group(1)) +            surl = smuggle_url(player_url, {'Referer': url}) +            return self.url_result(surl, 'Vimeo') + +        # Look for embedded YouTube player +        mobj = re.search( +            r'<iframe[^>]+?src="(https?://(?:www\.)?youtube.com/embed/.+?)"', webpage) +        if mobj: +            surl = unescapeHTML(mobj.group(1)) +            return self.url_result(surl, 'Youtube') + +        # Look for Bandcamp pages with custom domain +        mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage) +        if mobj is not None: +            burl = unescapeHTML(mobj.group(1)) +            return self.url_result(burl, 'Bandcamp') +          # Start with something easy: JW Player in SWFObject          mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)          if mobj is None: diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py index ab12d7e93..2570746b2 100644 --- a/youtube_dl/extractor/googleplus.py +++ b/youtube_dl/extractor/googleplus.py @@ -41,9 +41,9 @@ class GooglePlusIE(InfoExtractor):          # Extract update date          upload_date = self._html_search_regex( -            r'''(?x)<a.+?class="o-T-s\s[^"]+"\s+style="display:\s*none"\s*> +            r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>                      ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''', -            webpage, u'upload date', fatal=False) +            webpage, u'upload date', fatal=False, flags=re.VERBOSE)          if upload_date:              # Convert timestring to a format suitable for filename              upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py index ddc42882a..213aac428 100644 --- a/youtube_dl/extractor/instagram.py +++ b/youtube_dl/extractor/instagram.py @@ -26,7 +26,7 @@ class InstagramIE(InfoExtractor):          return [{              'id':        video_id, -            'url':       self._og_search_video_url(webpage), +            'url':       self._og_search_video_url(webpage, secure=False),              'ext':       'mp4',              'title':     u'Video by %s' % uploader_id,              'thumbnail': self._og_search_thumbnail(webpage), diff --git a/youtube_dl/extractor/internetvideoarchive.py b/youtube_dl/extractor/internetvideoarchive.py index 5986459d6..be8e05f53 100644 --- a/youtube_dl/extractor/internetvideoarchive.py +++ b/youtube_dl/extractor/internetvideoarchive.py @@ -19,7 +19,7 @@ class InternetVideoArchiveIE(InfoExtractor):          u'info_dict': {              u'title': u'SKYFALL',              u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.', -            u'duration': 156, +            u'duration': 153,          },      } @@ -74,7 +74,7 @@ class InternetVideoArchiveIE(InfoExtractor):              })          formats = sorted(formats, key=lambda f: f['bitrate']) -        info = { +        return {              'id': video_id,              'title': item.find('title').text,              'formats': formats, @@ -82,6 +82,3 @@ class InternetVideoArchiveIE(InfoExtractor):              'description': item.find('description').text,              'duration': int(attr['duration']),          } -        # TODO: Remove when #980 has been merged -        info.update(formats[-1]) -        return info diff --git a/youtube_dl/extractor/keezmovies.py b/youtube_dl/extractor/keezmovies.py new file mode 100644 index 000000000..5e05900da --- /dev/null +++ b/youtube_dl/extractor/keezmovies.py @@ -0,0 +1,61 @@ +import os +import re + +from .common import InfoExtractor +from ..utils import ( +    compat_urllib_parse_urlparse, +    compat_urllib_request, +    compat_urllib_parse, +) +from ..aes import ( +    aes_decrypt_text +) + +class KeezMoviesIE(InfoExtractor): +    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>keezmovies\.com/video/.+?(?P<videoid>[0-9]+))' +    _TEST = { +        u'url': u'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711', +        u'file': u'1214711.mp4', +        u'md5': u'6e297b7e789329923fcf83abb67c9289', +        u'info_dict': { +            u"title": u"Petite Asian Lady Mai Playing In Bathtub", +            u"age_limit": 18, +        } +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('videoid') +        url = 'http://www.' + mobj.group('url') + +        req = compat_urllib_request.Request(url) +        req.add_header('Cookie', 'age_verified=1') +        webpage = self._download_webpage(req, video_id) + +        # embedded video +        mobj = re.search(r'href="([^"]+)"></iframe>', webpage) +        if mobj: +            embedded_url = mobj.group(1) +            return self.url_result(embedded_url) + +        video_title = self._html_search_regex(r'<h1 [^>]*>([^<]+)', webpage, u'title') +        video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&', webpage, u'video_url')) +        if webpage.find('encrypted=true')!=-1: +            password = self._html_search_regex(r'video_title=(.+?)&', webpage, u'password') +            video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8') +        path = compat_urllib_parse_urlparse( video_url ).path +        extension = os.path.splitext( path )[1][1:] +        format = path.split('/')[4].split('_')[:2] +        format = "-".join( format ) + +        age_limit = self._rta_search(webpage) + +        return { +            'id': video_id, +            'title': video_title, +            'url': video_url, +            'ext': extension, +            'format': format, +            'format_id': format, +            'age_limit': age_limit, +        } diff --git a/youtube_dl/extractor/livestream.py b/youtube_dl/extractor/livestream.py index d04da98c8..4531fd6ab 100644 --- a/youtube_dl/extractor/livestream.py +++ b/youtube_dl/extractor/livestream.py @@ -40,13 +40,9 @@ class LivestreamIE(InfoExtractor):          if video_id is None:              # This is an event page: -            player = get_meta_content('twitter:player', webpage) -            if player is None: -                raise ExtractorError('Couldn\'t extract event api url') -            api_url = player.replace('/player', '') -            api_url = re.sub(r'^(https?://)(new\.)', r'\1api.\2', api_url) -            info = json.loads(self._download_webpage(api_url, event_name, -                                                     u'Downloading event info')) +            config_json = self._search_regex(r'window.config = ({.*?});', +                webpage, u'window config') +            info = json.loads(config_json)['event']              videos = [self._extract_video_info(video_data['data'])                  for video_data in info['feed']['data'] if video_data['type'] == u'video']              return self.playlist_result(videos, info['id'], info['full_name']) diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index e537648ff..91480ba87 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -20,10 +20,12 @@ class MetacafeIE(InfoExtractor):      _DISCLAIMER = 'http://www.metacafe.com/family_filter/'      _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'      IE_NAME = u'metacafe' -    _TESTS = [{ +    _TESTS = [ +    # Youtube video +    {          u"add_ie": ["Youtube"],          u"url":  u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/", -        u"file":  u"_aUehQsCQtM.flv", +        u"file":  u"_aUehQsCQtM.mp4",          u"info_dict": {              u"upload_date": u"20090102",              u"title": u"The Electric Company | \"Short I\" | PBS KIDS GO!", @@ -32,15 +34,42 @@ class MetacafeIE(InfoExtractor):              u"uploader_id": u"PBS"          }      }, +    # Normal metacafe video +    { +        u'url': u'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/', +        u'md5': u'6e0bca200eaad2552e6915ed6fd4d9ad', +        u'info_dict': { +            u'id': u'11121940', +            u'ext': u'mp4', +            u'title': u'News: Stuff You Won\'t Do with Your PlayStation 4', +            u'uploader': u'ign', +            u'description': u'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.', +        }, +    }, +    # AnyClip video      {          u"url": u"http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/",          u"file": u"an-dVVXnuY7Jh77J.mp4",          u"info_dict": {              u"title": u"The Andromeda Strain (1971): Stop the Bomb Part 3",              u"uploader": u"anyclip", -            u"description": u"md5:38c711dd98f5bb87acf973d573442e67" -        } -    }] +            u"description": u"md5:38c711dd98f5bb87acf973d573442e67", +        }, +    }, +    # age-restricted video +    { +        u'url': u'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/', +        u'md5': u'98dde7c1a35d02178e8ab7560fe8bd09', +        u'info_dict': { +            u'id': u'5186653', +            u'ext': u'mp4', +            u'title': u'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.', +            u'uploader': u'Dwayne Pipe', +            u'description': u'md5:950bf4c581e2c059911fa3ffbe377e4b', +            u'age_limit': 18, +        }, +    }, +    ]      def report_disclaimer(self): @@ -62,6 +91,7 @@ class MetacafeIE(InfoExtractor):              'submit': "Continue - I'm over 18",              }          request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) +        request.add_header('Content-Type', 'application/x-www-form-urlencoded')          try:              self.report_age_confirmation()              compat_urllib_request.urlopen(request).read() @@ -83,7 +113,12 @@ class MetacafeIE(InfoExtractor):          # Retrieve video webpage to extract further information          req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) -        req.headers['Cookie'] = 'flashVersion=0;' + +        # AnyClip videos require the flashversion cookie so that we get the link +        # to the mp4 file +        mobj_an = re.match(r'^an-(.*?)$', video_id) +        if mobj_an: +            req.headers['Cookie'] = 'flashVersion=0;'          webpage = self._download_webpage(req, video_id)          # Extract URL, uploader and title from webpage @@ -125,6 +160,11 @@ class MetacafeIE(InfoExtractor):                  r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',                  webpage, u'uploader nickname', fatal=False) +        if re.search(r'"contentRating":"restricted"', webpage) is not None: +            age_limit = 18 +        else: +            age_limit = 0 +          return {              '_type':    'video',              'id':       video_id, @@ -134,4 +174,5 @@ class MetacafeIE(InfoExtractor):              'upload_date':  None,              'title':    video_title,              'ext':      video_ext, +            'age_limit': age_limit,          } diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index e520e2bb4..e96d3952c 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -80,6 +80,8 @@ class MTVIE(InfoExtractor):          video_id = self._id_from_uri(uri)          self.report_extraction(video_id)          mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url'] +        # Remove the templates, like &device={device} +        mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', u'', mediagen_url)          if 'acceptMethods' not in mediagen_url:              mediagen_url += '&acceptMethods=fms'          mediagen_page = self._download_webpage(mediagen_url, video_id, diff --git a/youtube_dl/extractor/myspace.py b/youtube_dl/extractor/myspace.py new file mode 100644 index 000000000..050f54a5a --- /dev/null +++ b/youtube_dl/extractor/myspace.py @@ -0,0 +1,48 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( +    compat_str, +) + + +class MySpaceIE(InfoExtractor): +    _VALID_URL = r'https?://myspace\.com/([^/]+)/video/[^/]+/(?P<id>\d+)' + +    _TEST = { +        u'url': u'https://myspace.com/coldplay/video/viva-la-vida/100008689', +        u'info_dict': { +            u'id': u'100008689', +            u'ext': u'flv', +            u'title': u'Viva La Vida', +            u'description': u'The official Viva La Vida video, directed by Hype Williams', +            u'uploader': u'Coldplay', +            u'uploader_id': u'coldplay', +        }, +        u'params': { +            # rtmp download +            u'skip_download': True, +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') +        webpage = self._download_webpage(url, video_id) +        context = json.loads(self._search_regex(r'context = ({.*?});', webpage, +            u'context')) +        video = context['video'] +        rtmp_url, play_path = video['streamUrl'].split(';', 1) + +        return { +            'id': compat_str(video['mediaId']), +            'title': video['title'], +            'url': rtmp_url, +            'play_path': play_path, +            'ext': 'flv', +            'description': video['description'], +            'thumbnail': video['imageUrl'], +            'uploader': video['artistName'], +            'uploader_id': video['artistUsername'], +        } diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py index e8d43dd13..224f56ac8 100644 --- a/youtube_dl/extractor/nhl.py +++ b/youtube_dl/extractor/nhl.py @@ -90,8 +90,8 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):               r'{statusIndex:0,index:0,.*?id:(.*?),'],              webpage, u'category id')          playlist_title = self._html_search_regex( -            r'\?catid=%s">(.*?)</a>' % cat_id, -            webpage, u'playlist title', flags=re.DOTALL) +            r'tab0"[^>]*?>(.*?)</td>', +            webpage, u'playlist title', flags=re.DOTALL).lower().capitalize()          data = compat_urllib_parse.urlencode({              'cid': cat_id, diff --git a/youtube_dl/extractor/nowvideo.py b/youtube_dl/extractor/nowvideo.py index ab52ad401..241cc160b 100644 --- a/youtube_dl/extractor/nowvideo.py +++ b/youtube_dl/extractor/nowvideo.py @@ -20,7 +20,10 @@ class NowVideoIE(InfoExtractor):          video_id = mobj.group('id')          webpage_url = 'http://www.nowvideo.ch/video/' + video_id +        embed_url = 'http://embed.nowvideo.ch/embed.php?v=' + video_id          webpage = self._download_webpage(webpage_url, video_id) +        embed_page = self._download_webpage(embed_url, video_id, +            u'Downloading embed page')          self.report_extraction(video_id) @@ -28,7 +31,7 @@ class NowVideoIE(InfoExtractor):              webpage, u'video title')          video_key = self._search_regex(r'var fkzd="(.*)";', -            webpage, u'video key') +            embed_page, u'video key')          api_call = "http://www.nowvideo.ch/api/player.api.php?file={0}&numOfErrors=0&cid=1&key={1}".format(video_id, video_key)          api_response = self._download_webpage(api_call, video_id, diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py new file mode 100644 index 000000000..5e2454f1b --- /dev/null +++ b/youtube_dl/extractor/pornhub.py @@ -0,0 +1,69 @@ +import os +import re + +from .common import InfoExtractor +from ..utils import ( +    compat_urllib_parse_urlparse, +    compat_urllib_request, +    compat_urllib_parse, +    unescapeHTML, +) +from ..aes import ( +    aes_decrypt_text +) + +class PornHubIE(InfoExtractor): +    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>pornhub\.com/view_video\.php\?viewkey=(?P<videoid>[0-9]+))' +    _TEST = { +        u'url': u'http://www.pornhub.com/view_video.php?viewkey=648719015', +        u'file': u'648719015.mp4', +        u'md5': u'882f488fa1f0026f023f33576004a2ed', +        u'info_dict': { +            u"uploader": u"BABES-COM",  +            u"title": u"Seductive Indian beauty strips down and fingers her pink pussy", +            u"age_limit": 18 +        } +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('videoid') +        url = 'http://www.' + mobj.group('url') + +        req = compat_urllib_request.Request(url) +        req.add_header('Cookie', 'age_verified=1') +        webpage = self._download_webpage(req, video_id) + +        video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, u'title') +        video_uploader = self._html_search_regex(r'<b>From: </b>(?:\s|<[^>]*>)*(.+?)<', webpage, u'uploader', fatal=False) +        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, u'thumbnail', fatal=False) +        if thumbnail: +            thumbnail = compat_urllib_parse.unquote(thumbnail) + +        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage))) +        if webpage.find('"encrypted":true') != -1: +            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, u'password').replace('+', ' ') +            video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls)) + +        formats = [] +        for video_url in video_urls: +            path = compat_urllib_parse_urlparse( video_url ).path +            extension = os.path.splitext( path )[1][1:] +            format = path.split('/')[5].split('_')[:2] +            format = "-".join( format ) +            formats.append({ +                'url': video_url, +                'ext': extension, +                'format': format, +                'format_id': format, +            }) +        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-')))) + +        return { +            'id': video_id, +            'uploader': video_uploader, +            'title': video_title, +            'thumbnail': thumbnail, +            'formats': formats, +            'age_limit': 18, +        } diff --git a/youtube_dl/extractor/pornotube.py b/youtube_dl/extractor/pornotube.py index 5d770ec28..35dc5a9ff 100644 --- a/youtube_dl/extractor/pornotube.py +++ b/youtube_dl/extractor/pornotube.py @@ -16,7 +16,8 @@ class PornotubeIE(InfoExtractor):          u'md5': u'374dd6dcedd24234453b295209aa69b6',          u'info_dict': {              u"upload_date": u"20090708",  -            u"title": u"Marilyn-Monroe-Bathing" +            u"title": u"Marilyn-Monroe-Bathing", +            u"age_limit": 18          }      } diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py index 365aade56..994778e16 100644 --- a/youtube_dl/extractor/redtube.py +++ b/youtube_dl/extractor/redtube.py @@ -10,7 +10,8 @@ class RedTubeIE(InfoExtractor):          u'file': u'66418.mp4',          u'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',          u'info_dict': { -            u"title": u"Sucked on a toilet" +            u"title": u"Sucked on a toilet", +            u"age_limit": 18,          }      } diff --git a/youtube_dl/extractor/rtlnow.py b/youtube_dl/extractor/rtlnow.py index d1b08c9bc..9ac7c3be8 100644 --- a/youtube_dl/extractor/rtlnow.py +++ b/youtube_dl/extractor/rtlnow.py @@ -63,13 +63,12 @@ class RTLnowIE(InfoExtractor):          },      },      { -        u'url': u'http://www.rtlnitronow.de/recht-ordnung/lebensmittelkontrolle-erlangenordnungsamt-berlin.php?film_id=127367&player=1&season=1', -        u'file': u'127367.flv', +        u'url': u'http://www.rtlnitronow.de/recht-ordnung/stadtpolizei-frankfurt-gerichtsvollzieher-leipzig.php?film_id=129679&player=1&season=1', +        u'file': u'129679.flv',          u'info_dict': { -            u'upload_date': u'20130926',  -            u'title': u'Recht & Ordnung - Lebensmittelkontrolle Erlangen/Ordnungsamt...', -            u'description': u'Lebensmittelkontrolle Erlangen/Ordnungsamt Berlin', -            u'thumbnail': u'http://autoimg.static-fra.de/nitronow/344787/1500x1500/image2.jpg', +            u'upload_date': u'20131016',  +            u'title': u'Recht & Ordnung - Stadtpolizei Frankfurt/ Gerichtsvollzieher...', +            u'description': u'Stadtpolizei Frankfurt/ Gerichtsvollzieher Leipzig',          },          u'params': {              u'skip_download': True, diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py new file mode 100644 index 000000000..a18034fe2 --- /dev/null +++ b/youtube_dl/extractor/rutube.py @@ -0,0 +1,58 @@ +# encoding: utf-8 +import re +import json + +from .common import InfoExtractor +from ..utils import ( +    compat_urlparse, +    compat_str, +    ExtractorError, +) + + +class RutubeIE(InfoExtractor): +    _VALID_URL = r'https?://rutube.ru/video/(?P<long_id>\w+)' + +    _TEST = { +        u'url': u'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/', +        u'file': u'3eac3b4561676c17df9132a9a1e62e3e.mp4', +        u'info_dict': { +            u'title': u'Раненный кенгуру забежал в аптеку', +            u'uploader': u'NTDRussian', +            u'uploader_id': u'29790', +        }, +        u'params': { +            # It requires ffmpeg (m3u8 download) +            u'skip_download': True, +        }, +    } + +    def _get_api_response(self, short_id, subpath): +        api_url = 'http://rutube.ru/api/play/%s/%s/?format=json' % (subpath, short_id) +        response_json = self._download_webpage(api_url, short_id, +            u'Downloading %s json' % subpath) +        return json.loads(response_json) + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        long_id = mobj.group('long_id') +        webpage = self._download_webpage(url, long_id) +        og_video = self._og_search_video_url(webpage) +        short_id = compat_urlparse.urlparse(og_video).path[1:] +        options = self._get_api_response(short_id, 'options') +        trackinfo = self._get_api_response(short_id, 'trackinfo') +        # Some videos don't have the author field +        author = trackinfo.get('author') or {} +        m3u8_url = trackinfo['video_balancer'].get('m3u8') +        if m3u8_url is None: +            raise ExtractorError(u'Couldn\'t find m3u8 manifest url') + +        return { +            'id': trackinfo['id'], +            'title': trackinfo['title'], +            'url': m3u8_url, +            'ext': 'mp4', +            'thumbnail': options['thumbnail_url'], +            'uploader': author.get('name'), +            'uploader_id': compat_str(author['id']) if author else None, +        } diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py new file mode 100644 index 000000000..32df0a7fb --- /dev/null +++ b/youtube_dl/extractor/spankwire.py @@ -0,0 +1,74 @@ +import os +import re + +from .common import InfoExtractor +from ..utils import ( +    compat_urllib_parse_urlparse, +    compat_urllib_request, +    compat_urllib_parse, +    unescapeHTML, +) +from ..aes import ( +    aes_decrypt_text +) + +class SpankwireIE(InfoExtractor): +    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)' +    _TEST = { +        u'url': u'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/', +        u'file': u'103545.mp4', +        u'md5': u'1b3f55e345500552dbc252a3e9c1af43', +        u'info_dict': { +            u"uploader": u"oreusz",  +            u"title": u"Buckcherry`s X Rated Music Video Crazy Bitch", +            u"description": u"Crazy Bitch X rated music video.", +            u"age_limit": 18, +        } +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('videoid') +        url = 'http://www.' + mobj.group('url') + +        req = compat_urllib_request.Request(url) +        req.add_header('Cookie', 'age_verified=1') +        webpage = self._download_webpage(req, video_id) + +        video_title = self._html_search_regex(r'<h1>([^<]+)', webpage, u'title') +        video_uploader = self._html_search_regex(r'by:\s*<a [^>]*>(.+?)</a>', webpage, u'uploader', fatal=False) +        thumbnail = self._html_search_regex(r'flashvars\.image_url = "([^"]+)', webpage, u'thumbnail', fatal=False) +        description = self._html_search_regex(r'>\s*Description:</div>\s*<[^>]*>([^<]+)', webpage, u'description', fatal=False) +        if len(description) == 0: +            description = None + +        video_urls = list(map(compat_urllib_parse.unquote , re.findall(r'flashvars\.quality_[0-9]{3}p = "([^"]+)', webpage))) +        if webpage.find('flashvars\.encrypted = "true"') != -1: +            password = self._html_search_regex(r'flashvars\.video_title = "([^"]+)', webpage, u'password').replace('+', ' ') +            video_urls = list(map(lambda s: aes_decrypt_text(s, password, 32).decode('utf-8'), video_urls)) + +        formats = [] +        for video_url in video_urls: +            path = compat_urllib_parse_urlparse( video_url ).path +            extension = os.path.splitext( path )[1][1:] +            format = path.split('/')[4].split('_')[:2] +            format = "-".join( format ) +            formats.append({ +                'url': video_url, +                'ext': extension, +                'format': format, +                'format_id': format, +            }) +        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-')))) + +        age_limit = self._rta_search(webpage) + +        return { +            'id': video_id, +            'uploader': video_uploader, +            'title': video_title, +            'thumbnail': thumbnail, +            'description': description, +            'formats': formats, +            'age_limit': age_limit, +        } diff --git a/youtube_dl/extractor/sztvhu.py b/youtube_dl/extractor/sztvhu.py new file mode 100644 index 000000000..81fa35c4b --- /dev/null +++ b/youtube_dl/extractor/sztvhu.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +import re + +from .common import InfoExtractor +from ..utils import determine_ext + + +class SztvHuIE(InfoExtractor): +    _VALID_URL = r'(?:http://)?(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)' +    _TEST = { +        u'url': u'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', +        u'file': u'20130909.mp4', +        u'md5': u'a6df607b11fb07d0e9f2ad94613375cb', +        u'info_dict': { +            u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren", +            u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', +        } +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') +        webpage = self._download_webpage(url, video_id) +        video_file = self._search_regex( +            r'file: "...:(.*?)",', webpage, 'video file') +        title = self._html_search_regex( +            r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"', +            webpage, 'video title') +        description = self._html_search_regex( +            r'<meta name="description" content="([^"]*)"/>', +            webpage, 'video description', fatal=False) +        thumbnail = self._og_search_thumbnail(webpage) + +        video_url = 'http://media.sztv.hu/vod/' + video_file + +        return { +            'id': video_id, +            'url': video_url, +            'title': title, +            'ext': determine_ext(video_url), +            'description': description, +            'thumbnail': thumbnail, +        } diff --git a/youtube_dl/extractor/techtalks.py b/youtube_dl/extractor/techtalks.py new file mode 100644 index 000000000..a55f236cb --- /dev/null +++ b/youtube_dl/extractor/techtalks.py @@ -0,0 +1,65 @@ +import re + +from .common import InfoExtractor +from ..utils import ( +    get_element_by_attribute, +    clean_html, +) + + +class TechTalksIE(InfoExtractor): +    _VALID_URL = r'https?://techtalks\.tv/talks/[^/]*/(?P<id>\d+)/' + +    _TEST = { +        u'url': u'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/', +        u'playlist': [ +            { +                u'file': u'57758.flv', +                u'info_dict': { +                    u'title': u'Learning Topic Models --- Going beyond SVD', +                }, +            }, +            { +                u'file': u'57758-slides.flv', +                u'info_dict': { +                    u'title': u'Learning Topic Models --- Going beyond SVD', +                }, +            }, +        ], +        u'params': { +            # rtmp download +            u'skip_download': True, +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        talk_id = mobj.group('id') +        webpage = self._download_webpage(url, talk_id) +        rtmp_url = self._search_regex(r'netConnectionUrl: \'(.*?)\'', webpage, +            u'rtmp url') +        play_path = self._search_regex(r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"', +            webpage, u'presenter play path') +        title = clean_html(get_element_by_attribute('class', 'title', webpage)) +        video_info = { +                'id': talk_id, +                'title': title, +                'url': rtmp_url, +                'play_path': play_path, +                'ext': 'flv', +            } +        m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage) +        if m_slides is None: +            return video_info +        else: +            return [ +                video_info, +                # The slides video +                { +                    'id': talk_id + '-slides', +                    'title': title, +                    'url': rtmp_url, +                    'play_path': m_slides.group(1), +                    'ext': 'flv', +                }, +            ] diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py new file mode 100644 index 000000000..aea9d9a24 --- /dev/null +++ b/youtube_dl/extractor/tube8.py @@ -0,0 +1,65 @@ +import os +import re + +from .common import InfoExtractor +from ..utils import ( +    compat_urllib_parse_urlparse, +    compat_urllib_request, +    compat_urllib_parse, +    unescapeHTML, +) +from ..aes import ( +    aes_decrypt_text +) + +class Tube8IE(InfoExtractor): +    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>tube8\.com/[^/]+/[^/]+/(?P<videoid>[0-9]+)/?)' +    _TEST = { +        u'url': u'http://www.tube8.com/teen/kasia-music-video/229795/', +        u'file': u'229795.mp4', +        u'md5': u'e9e0b0c86734e5e3766e653509475db0', +        u'info_dict': { +            u"description": u"hot teen Kasia grinding",  +            u"uploader": u"unknown",  +            u"title": u"Kasia music video", +            u"age_limit": 18, +        } +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('videoid') +        url = 'http://www.' + mobj.group('url') + +        req = compat_urllib_request.Request(url) +        req.add_header('Cookie', 'age_verified=1') +        webpage = self._download_webpage(req, video_id) + +        video_title = self._html_search_regex(r'videotitle	="([^"]+)', webpage, u'title') +        video_description = self._html_search_regex(r'>Description:</strong>(.+?)<', webpage, u'description', fatal=False) +        video_uploader = self._html_search_regex(r'>Submitted by:</strong>(?:\s|<[^>]*>)*(.+?)<', webpage, u'uploader', fatal=False) +        thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, u'thumbnail', fatal=False) +        if thumbnail: +            thumbnail = thumbnail.replace('\\/', '/') + +        video_url = self._html_search_regex(r'"video_url":"([^"]+)', webpage, u'video_url') +        if webpage.find('"encrypted":true')!=-1: +            password = self._html_search_regex(r'"video_title":"([^"]+)', webpage, u'password') +            video_url = aes_decrypt_text(video_url, password, 32).decode('utf-8') +        path = compat_urllib_parse_urlparse( video_url ).path +        extension = os.path.splitext( path )[1][1:] +        format = path.split('/')[4].split('_')[:2] +        format = "-".join( format ) + +        return { +            'id': video_id, +            'uploader': video_uploader, +            'title': video_title, +            'thumbnail': thumbnail, +            'description': video_description, +            'url': video_url, +            'ext': extension, +            'format': format, +            'format_id': format, +            'age_limit': 18, +        } diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py index 1405b73f7..7a3891b89 100644 --- a/youtube_dl/extractor/tudou.py +++ b/youtube_dl/extractor/tudou.py @@ -7,15 +7,25 @@ from .common import InfoExtractor  class TudouIE(InfoExtractor): -    _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?' -    _TEST = { +    _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?' +    _TESTS = [{          u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html',          u'file': u'159448201.f4v',          u'md5': u'140a49ed444bd22f93330985d8475fcb',          u'info_dict': {              u"title": u"卡马乔国足开大脚长传冲吊集锦"          } -    } +    }, +    { +        u'url': u'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html', +        u'file': u'todo.mp4', +        u'md5': u'todo.mp4', +        u'info_dict': { +            u'title': u'todo.mp4', +        }, +        u'add_ie': [u'Youku'], +        u'skip': u'Only works from China' +    }]      def _url_for_id(self, id, quality = None):          info_url = "http://v2.tudou.com/f?id="+str(id) @@ -29,14 +39,19 @@ class TudouIE(InfoExtractor):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group(2)          webpage = self._download_webpage(url, video_id) -        title = re.search(",kw:\"(.+)\"",webpage) -        if title is None: -            title = re.search(",kw: \'(.+)\'",webpage) -        title = title.group(1) -        thumbnail_url = re.search(",pic: \'(.+?)\'",webpage) -        if thumbnail_url is None: -            thumbnail_url = re.search(",pic:\"(.+?)\"",webpage) -        thumbnail_url = thumbnail_url.group(1) + +        m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage) +        if m and m.group(1): +            return { +                '_type': 'url', +                'url': u'youku:' + m.group(1), +                'ie_key': 'Youku' +            } + +        title = self._search_regex( +            r",kw:\s*['\"](.+?)[\"']", webpage, u'title') +        thumbnail_url = self._search_regex( +            r",pic:\s*[\"'](.+?)[\"']", webpage, u'thumbnail URL', fatal=False)          segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')          segments = json.loads(segs_json) diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py index 1c1cc418d..3f6020f74 100644 --- a/youtube_dl/extractor/vevo.py +++ b/youtube_dl/extractor/vevo.py @@ -5,7 +5,7 @@ import datetime  from .common import InfoExtractor  from ..utils import ( -    determine_ext, +    compat_HTTPError,      ExtractorError,  ) @@ -16,26 +16,22 @@ class VevoIE(InfoExtractor):      (currently used by MTVIE)      """      _VALID_URL = r'((http://www.vevo.com/watch/.*?/.*?/)|(vevo:))(?P<id>.*?)(\?|$)' -    _TEST = { +    _TESTS = [{          u'url': u'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',          u'file': u'GB1101300280.mp4', +        u"md5": u"06bea460acb744eab74a9d7dcb4bfd61",          u'info_dict': {              u"upload_date": u"20130624",              u"uploader": u"Hurts",              u"title": u"Somebody to Die For", -            u'duration': 230, +            u"duration": 230, +            u"width": 1920, +            u"height": 1080,          } -    } +    }] +    _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/' -    def _real_extract(self, url): -        mobj = re.match(self._VALID_URL, url) -        video_id = mobj.group('id') - -        json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id -        info_json = self._download_webpage(json_url, video_id, u'Downloading json info') - -        self.report_extraction(video_id) -        video_info = json.loads(info_json)['video'] +    def _formats_from_json(self, video_info):          last_version = {'version': -1}          for version in video_info['videoVersions']:              # These are the HTTP downloads, other types are for different manifests @@ -50,17 +46,74 @@ class VevoIE(InfoExtractor):          # Already sorted from worst to best quality          for rend in renditions.findall('rendition'):              attr = rend.attrib -            f_url = attr['url'] +            format_note = '%(videoCodec)s@%(videoBitrate)4sk, %(audioCodec)s@%(audioBitrate)3sk' % attr              formats.append({ -                'url': f_url, -                'ext': determine_ext(f_url), +                'url': attr['url'], +                'format_id': attr['name'], +                'format_note': format_note,                  'height': int(attr['frameheight']),                  'width': int(attr['frameWidth']),              }) +        return formats + +    def _formats_from_smil(self, smil_xml): +        formats = [] +        smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8')) +        els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video') +        for el in els: +            src = el.attrib['src'] +            m = re.match(r'''(?xi) +                (?P<ext>[a-z0-9]+): +                (?P<path> +                    [/a-z0-9]+     # The directory and main part of the URL +                    _(?P<cbr>[0-9]+)k +                    _(?P<width>[0-9]+)x(?P<height>[0-9]+) +                    _(?P<vcodec>[a-z0-9]+) +                    _(?P<vbr>[0-9]+) +                    _(?P<acodec>[a-z0-9]+) +                    _(?P<abr>[0-9]+) +                    \.[a-z0-9]+  # File extension +                )''', src) +            if not m: +                continue -        date_epoch = int(self._search_regex( -            r'/Date\((\d+)\)/', video_info['launchDate'], u'launch date'))/1000 -        upload_date = datetime.datetime.fromtimestamp(date_epoch) +            format_url = self._SMIL_BASE_URL + m.group('path') +            format_note = ('%(vcodec)s@%(vbr)4sk, %(acodec)s@%(abr)3sk' % +                           m.groupdict()) +            formats.append({ +                'url': format_url, +                'format_id': u'SMIL_' + m.group('cbr'), +                'format_note': format_note, +                'ext': m.group('ext'), +                'width': int(m.group('width')), +                'height': int(m.group('height')), +            }) +        return formats + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') + +        json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id +        info_json = self._download_webpage(json_url, video_id, u'Downloading json info') +        video_info = json.loads(info_json)['video'] + +        formats = self._formats_from_json(video_info) +        try: +            smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % ( +                self._SMIL_BASE_URL, video_id, video_id.lower()) +            smil_xml = self._download_webpage(smil_url, video_id, +                                              u'Downloading SMIL info') +            formats.extend(self._formats_from_smil(smil_xml)) +        except ExtractorError as ee: +            if not isinstance(ee.cause, compat_HTTPError): +                raise +            self._downloader.report_warning( +                u'Cannot download SMIL information, falling back to JSON ..') + +        timestamp_ms = int(self._search_regex( +            r'/Date\((\d+)\)/', video_info['launchDate'], u'launch date')) +        upload_date = datetime.datetime.fromtimestamp(timestamp_ms // 1000)          info = {              'id': video_id,              'title': video_info['title'], @@ -71,7 +124,4 @@ class VevoIE(InfoExtractor):              'duration': video_info['duration'],          } -        # TODO: Remove when #980 has been merged -        info.update(formats[-1]) -          return info diff --git a/youtube_dl/extractor/videodetective.py b/youtube_dl/extractor/videodetective.py index d89f84094..265dd5b91 100644 --- a/youtube_dl/extractor/videodetective.py +++ b/youtube_dl/extractor/videodetective.py @@ -16,7 +16,7 @@ class VideoDetectiveIE(InfoExtractor):          u'info_dict': {              u'title': u'KICK-ASS 2',              u'description': u'md5:65ba37ad619165afac7d432eaded6013', -            u'duration': 138, +            u'duration': 135,          },      } diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index cea29f035..c7d864a2b 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -1,3 +1,4 @@ +# encoding: utf-8  import json  import re  import itertools @@ -10,19 +11,21 @@ from ..utils import (      clean_html,      get_element_by_attribute,      ExtractorError, +    RegexNotFoundError,      std_headers, +    unsmuggle_url,  )  class VimeoIE(InfoExtractor):      """Information extractor for vimeo.com."""      # _VALID_URL matches Vimeo URLs -    _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)/?(?:[?].*)?$' +    _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)/?(?:[?].*)?(?:#.*)?$'      _NETRC_MACHINE = 'vimeo'      IE_NAME = u'vimeo'      _TESTS = [          { -            u'url': u'http://vimeo.com/56015672', +            u'url': u'http://vimeo.com/56015672#at=0',              u'file': u'56015672.mp4',              u'md5': u'8879b6cc097e987f02484baf890129e5',              u'info_dict': { @@ -54,6 +57,21 @@ class VimeoIE(InfoExtractor):                  u'uploader': u'The BLN & Business of Software',              },          }, +        { +            u'url': u'http://vimeo.com/68375962', +            u'file': u'68375962.mp4', +            u'md5': u'aaf896bdb7ddd6476df50007a0ac0ae7', +            u'note': u'Video protected with password', +            u'info_dict': { +                u'title': u'youtube-dl password protected test video', +                u'upload_date': u'20130614', +                u'uploader_id': u'user18948128', +                u'uploader': u'Jaime Marquínez Ferrándiz', +            }, +            u'params': { +                u'videopassword': u'youtube-dl', +            }, +        },      ]      def _login(self): @@ -98,6 +116,12 @@ class VimeoIE(InfoExtractor):          self._login()      def _real_extract(self, url, new_video=True): +        url, data = unsmuggle_url(url) +        headers = std_headers +        if data is not None: +            headers = headers.copy() +            headers.update(data) +          # Extract ID from URL          mobj = re.match(self._VALID_URL, url)          if mobj is None: @@ -112,7 +136,7 @@ class VimeoIE(InfoExtractor):              url = 'https://vimeo.com/' + video_id          # Retrieve video webpage to extract further information -        request = compat_urllib_request.Request(url, None, std_headers) +        request = compat_urllib_request.Request(url, None, headers)          webpage = self._download_webpage(request, video_id)          # Now we begin extracting as much information as we can from what we @@ -122,18 +146,26 @@ class VimeoIE(InfoExtractor):          # Extract the config JSON          try: -            config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'], -                webpage, u'info section', flags=re.DOTALL) -            config = json.loads(config) -        except: +            try: +                config_url = self._html_search_regex( +                    r' data-config-url="(.+?)"', webpage, u'config URL') +                config_json = self._download_webpage(config_url, video_id) +                config = json.loads(config_json) +            except RegexNotFoundError: +                # For pro videos or player.vimeo.com urls +                config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'], +                    webpage, u'info section', flags=re.DOTALL) +                config = json.loads(config) +        except Exception as e:              if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):                  raise ExtractorError(u'The author has restricted the access to this video, try with the "--referer" option') -            if re.search('If so please provide the correct password.', webpage): +            if re.search('<form[^>]+?id="pw_form"', webpage) is not None:                  self._verify_video_password(url, video_id, webpage)                  return self._real_extract(url)              else: -                raise ExtractorError(u'Unable to extract info section') +                raise ExtractorError(u'Unable to extract info section', +                                     cause=e)          # Extract title          video_title = config["video"]["title"] @@ -172,46 +204,45 @@ class VimeoIE(InfoExtractor):          # Vimeo specific: extract video codec and quality information          # First consider quality, then codecs, then take everything -        # TODO bind to format param -        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] +        codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]          files = { 'hd': [], 'sd': [], 'other': []}          config_files = config["video"].get("files") or config["request"].get("files")          for codec_name, codec_extension in codecs: -            if codec_name in config_files: -                if 'hd' in config_files[codec_name]: -                    files['hd'].append((codec_name, codec_extension, 'hd')) -                elif 'sd' in config_files[codec_name]: -                    files['sd'].append((codec_name, codec_extension, 'sd')) +            for quality in config_files.get(codec_name, []): +                format_id = '-'.join((codec_name, quality)).lower() +                key = quality if quality in files else 'other' +                video_url = None +                if isinstance(config_files[codec_name], dict): +                    file_info = config_files[codec_name][quality] +                    video_url = file_info.get('url')                  else: -                    files['other'].append((codec_name, codec_extension, config_files[codec_name][0])) - -        for quality in ('hd', 'sd', 'other'): -            if len(files[quality]) > 0: -                video_quality = files[quality][0][2] -                video_codec = files[quality][0][0] -                video_extension = files[quality][0][1] -                self.to_screen(u'%s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) -                break -        else: -            raise ExtractorError(u'No known codec found') +                    file_info = {} +                if video_url is None: +                    video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ +                        %(video_id, sig, timestamp, quality, codec_name.upper()) -        video_url = None -        if isinstance(config_files[video_codec], dict): -            video_url = config_files[video_codec][video_quality].get("url") -        if video_url is None: -            video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ -                        %(video_id, sig, timestamp, video_quality, video_codec.upper()) +                files[key].append({ +                    'ext': codec_extension, +                    'url': video_url, +                    'format_id': format_id, +                    'width': file_info.get('width'), +                    'height': file_info.get('height'), +                }) +        formats = [] +        for key in ('other', 'sd', 'hd'): +            formats += files[key] +        if len(formats) == 0: +            raise ExtractorError(u'No known codec found')          return [{              'id':       video_id, -            'url':      video_url,              'uploader': video_uploader,              'uploader_id': video_uploader_id,              'upload_date':  video_upload_date,              'title':    video_title, -            'ext':      video_extension,              'thumbnail':    video_thumbnail,              'description':  video_description, +            'formats': formats,          }] diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py new file mode 100644 index 000000000..90d8a6d07 --- /dev/null +++ b/youtube_dl/extractor/vk.py @@ -0,0 +1,45 @@ +# encoding: utf-8 +import re +import json + +from .common import InfoExtractor +from ..utils import ( +    compat_str, +    unescapeHTML, +) + + +class VKIE(InfoExtractor): +    IE_NAME = u'vk.com' +    _VALID_URL = r'https?://vk\.com/(?:videos.*?\?.*?z=)?video(?P<id>.*?)(?:\?|%2F|$)' + +    _TEST = { +        u'url': u'http://vk.com/videos-77521?z=video-77521_162222515%2Fclub77521', +        u'md5': u'0deae91935c54e00003c2a00646315f0', +        u'info_dict': { +            u'id': u'162222515', +            u'ext': u'flv', +            u'title': u'ProtivoGunz - Хуёвая песня', +            u'uploader': u'Noize MC', +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') +        info_url = 'http://vk.com/al_video.php?act=show&al=1&video=%s' % video_id +        info_page = self._download_webpage(info_url, video_id) +        m_yt = re.search(r'src="(http://www.youtube.com/.*?)"', info_page) +        if m_yt is not None: +            self.to_screen(u'Youtube video detected') +            return self.url_result(m_yt.group(1), 'Youtube') +        vars_json = self._search_regex(r'var vars = ({.*?});', info_page, u'vars') +        vars = json.loads(vars_json) + +        return { +            'id': compat_str(vars['vid']), +            'url': vars['url240'], +            'title': unescapeHTML(vars['md_title']), +            'thumbnail': vars['jpg'], +            'uploader': vars['md_author'], +        } diff --git a/youtube_dl/extractor/websurg.py b/youtube_dl/extractor/websurg.py new file mode 100644 index 000000000..43953bfdd --- /dev/null +++ b/youtube_dl/extractor/websurg.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +import re + +from ..utils import ( +    compat_urllib_request, +    compat_urllib_parse +) + +from .common import InfoExtractor + +class WeBSurgIE(InfoExtractor): +    IE_NAME = u'websurg.com' +    _VALID_URL = r'http://.*?\.websurg\.com/MEDIA/\?noheader=1&doi=(.*)' + +    _TEST = { +        u'url': u'http://www.websurg.com/MEDIA/?noheader=1&doi=vd01en4012', +        u'file': u'vd01en4012.mp4', +        u'params': { +            u'skip_download': True, +        }, +        u'skip': u'Requires login information', +    } +     +    _LOGIN_URL = 'http://www.websurg.com/inc/login/login_div.ajax.php?login=1' + +    def _real_initialize(self): + +        login_form = { +            'username': self._downloader.params['username'], +            'password': self._downloader.params['password'], +            'Submit': 1 +        } +         +        request = compat_urllib_request.Request( +            self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) +        request.add_header( +            'Content-Type', 'application/x-www-form-urlencoded;charset=utf-8') +        compat_urllib_request.urlopen(request).info() +        webpage = self._download_webpage(self._LOGIN_URL, '', 'Logging in') +         +        if webpage != 'OK': +            self._downloader.report_error( +                u'Unable to log in: bad username/password') +         +    def _real_extract(self, url): +        video_id = re.match(self._VALID_URL, url).group(1) +         +        webpage = self._download_webpage(url, video_id) +         +        url_info = re.search(r'streamer="(.*?)" src="(.*?)"', webpage) +         +        return {'id': video_id, +                'title': self._og_search_title(webpage), +                'description': self._og_search_description(webpage), +                'ext' : 'mp4', +                'url' : url_info.group(1) + '/' + url_info.group(2), +                'thumbnail': self._og_search_thumbnail(webpage) +                } diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py index 361619694..7444d3393 100644 --- a/youtube_dl/extractor/xhamster.py +++ b/youtube_dl/extractor/xhamster.py @@ -19,7 +19,8 @@ class XHamsterIE(InfoExtractor):          u'info_dict': {              u"upload_date": u"20121014",               u"uploader_id": u"Ruseful2011",  -            u"title": u"FemaleAgent Shy beauty takes the bait" +            u"title": u"FemaleAgent Shy beauty takes the bait", +            u"age_limit": 18,          }      },      { @@ -27,28 +28,33 @@ class XHamsterIE(InfoExtractor):          u'file': u'2221348.flv',          u'md5': u'e767b9475de189320f691f49c679c4c7',          u'info_dict': { -            u"upload_date": u"20130914",  -            u"uploader_id": u"jojo747400",  -            u"title": u"Britney Spears  Sexy Booty" +            u"upload_date": u"20130914", +            u"uploader_id": u"jojo747400", +            u"title": u"Britney Spears  Sexy Booty", +            u"age_limit": 18,          }      }]      def _real_extract(self,url): +        def extract_video_url(webpage): +            mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage) +            if mobj is None: +                raise ExtractorError(u'Unable to extract media URL') +            if len(mobj.group('server')) == 0: +                return compat_urllib_parse.unquote(mobj.group('file')) +            else: +                return mobj.group('server')+'/key='+mobj.group('file') + +        def is_hd(webpage): +            return webpage.find('<div class=\'icon iconHD\'>') != -1 +          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('id')          seo = mobj.group('seo') -        mrss_url = 'http://xhamster.com/movies/%s/%s.html?hd' % (video_id, seo) +        mrss_url = 'http://xhamster.com/movies/%s/%s.html' % (video_id, seo)          webpage = self._download_webpage(mrss_url, video_id) -        mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage) -        if mobj is None: -            raise ExtractorError(u'Unable to extract media URL') -        if len(mobj.group('server')) == 0: -            video_url = compat_urllib_parse.unquote(mobj.group('file')) -        else: -            video_url = mobj.group('server')+'/key='+mobj.group('file') -          video_title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>',              webpage, u'title') @@ -72,13 +78,34 @@ class XHamsterIE(InfoExtractor):          video_thumbnail = self._search_regex(r'\'image\':\'(?P<thumbnail>[^\']+)\'',              webpage, u'thumbnail', fatal=False) -        return [{ -            'id':       video_id, -            'url':      video_url, -            'ext':      determine_ext(video_url), -            'title':    video_title, +        age_limit = self._rta_search(webpage) + +        video_url = extract_video_url(webpage) +        hd = is_hd(webpage) +        formats = [{ +            'url': video_url, +            'ext': determine_ext(video_url), +            'format': 'hd' if hd else 'sd', +            'format_id': 'hd' if hd else 'sd', +        }] +        if not hd: +            webpage = self._download_webpage(mrss_url+'?hd', video_id) +            if is_hd(webpage): +                video_url = extract_video_url(webpage) +                formats.append({ +                    'url': video_url, +                    'ext': determine_ext(video_url), +                    'format': 'hd', +                    'format_id': 'hd', +                }) + +        return { +            'id': video_id, +            'title': video_title, +            'formats': formats,              'description': video_description,              'upload_date': video_upload_date,              'uploader_id': video_uploader_id, -            'thumbnail': video_thumbnail -        }] +            'thumbnail': video_thumbnail, +            'age_limit': age_limit, +        } diff --git a/youtube_dl/extractor/xnxx.py b/youtube_dl/extractor/xnxx.py index 40d848900..8a0eb1afd 100644 --- a/youtube_dl/extractor/xnxx.py +++ b/youtube_dl/extractor/xnxx.py @@ -18,7 +18,8 @@ class XNXXIE(InfoExtractor):          u'file': u'1135332.flv',          u'md5': u'0831677e2b4761795f68d417e0b7b445',          u'info_dict': { -            u"title": u"lida \u00bb Naked Funny Actress  (5)" +            u"title": u"lida \u00bb Naked Funny Actress  (5)", +            u"age_limit": 18,          }      } @@ -50,4 +51,5 @@ class XNXXIE(InfoExtractor):              'ext': 'flv',              'thumbnail': video_thumbnail,              'description': None, +            'age_limit': 18,          }] diff --git a/youtube_dl/extractor/xvideos.py b/youtube_dl/extractor/xvideos.py index c3b9736d7..90138d7e5 100644 --- a/youtube_dl/extractor/xvideos.py +++ b/youtube_dl/extractor/xvideos.py @@ -13,7 +13,8 @@ class XVideosIE(InfoExtractor):          u'file': u'939581.flv',          u'md5': u'1d0c835822f0a71a7bf011855db929d0',          u'info_dict': { -            u"title": u"Funny Porns By >>>>S<<<<<< -1" +            u"title": u"Funny Porns By >>>>S<<<<<< -1", +            u"age_limit": 18,          }      } @@ -46,6 +47,7 @@ class XVideosIE(InfoExtractor):              'ext': 'flv',              'thumbnail': video_thumbnail,              'description': None, +            'age_limit': 18,          }          return [info] diff --git a/youtube_dl/extractor/youjizz.py b/youtube_dl/extractor/youjizz.py index 1265639e8..1fcc518ac 100644 --- a/youtube_dl/extractor/youjizz.py +++ b/youtube_dl/extractor/youjizz.py @@ -13,7 +13,8 @@ class YouJizzIE(InfoExtractor):          u'file': u'2189178.flv',          u'md5': u'07e15fa469ba384c7693fd246905547c',          u'info_dict': { -            u"title": u"Zeichentrick 1" +            u"title": u"Zeichentrick 1", +            u"age_limit": 18,          }      } @@ -25,6 +26,8 @@ class YouJizzIE(InfoExtractor):          # Get webpage content          webpage = self._download_webpage(url, video_id) +        age_limit = self._rta_search(webpage) +          # Get the video title          video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',              webpage, u'title').strip() @@ -60,6 +63,7 @@ class YouJizzIE(InfoExtractor):                  'title': video_title,                  'ext': 'flv',                  'format': 'flv', -                'player_url': embed_page_url} +                'player_url': embed_page_url, +                'age_limit': age_limit}          return [info] diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py index b1f93dd1b..e46a9b4d6 100644 --- a/youtube_dl/extractor/youporn.py +++ b/youtube_dl/extractor/youporn.py @@ -17,7 +17,7 @@ from ..aes import (  )  class YouPornIE(InfoExtractor): -    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)' +    _VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'      _TEST = {          u'url': u'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',          u'file': u'505835.mp4', @@ -26,27 +26,15 @@ class YouPornIE(InfoExtractor):              u"upload_date": u"20101221",               u"description": u"Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?",               u"uploader": u"Ask Dan And Jennifer",  -            u"title": u"Sex Ed: Is It Safe To Masturbate Daily?" +            u"title": u"Sex Ed: Is It Safe To Masturbate Daily?", +            u"age_limit": 18,          }      } -    def _print_formats(self, formats): -        """Print all available formats""" -        print(u'Available formats:') -        print(u'ext\t\tformat') -        print(u'---------------------------------') -        for format in formats: -            print(u'%s\t\t%s'  % (format['ext'], format['format'])) - -    def _specific(self, req_format, formats): -        for x in formats: -            if x["format"] == req_format: -                return x -        return None -      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('videoid') +        url = 'http://www.' + mobj.group('url')          req = compat_urllib_request.Request(url)          req.add_header('Cookie', 'age_verified=1') @@ -70,27 +58,22 @@ class YouPornIE(InfoExtractor):          except KeyError:              raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1]) -        # Get all of the formats available +        # Get all of the links from the page          DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'          download_list_html = self._search_regex(DOWNLOAD_LIST_RE,              webpage, u'download list').strip() - -        # Get all of the links from the page -        LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">' +        LINK_RE = r'<a href="([^"]+)">'          links = re.findall(LINK_RE, download_list_html) -         -        # Get link of hd video if available -        mobj = re.search(r'var encryptedQuality720URL = \'(?P<encrypted_video_url>[a-zA-Z0-9+/]+={0,2})\';', webpage) -        if mobj != None: -            encrypted_video_url = mobj.group(u'encrypted_video_url') -            video_url = aes_decrypt_text(encrypted_video_url, video_title, 32).decode('utf-8') -            links = [video_url] + links + +        # Get all encrypted links +        encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage) +        for encrypted_link in encrypted_links: +            link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8') +            links.append(link)          if not links:              raise ExtractorError(u'ERROR: no known formats available for video') -        self.to_screen(u'Links found: %d' % len(links)) -          formats = []          for link in links: @@ -102,39 +85,32 @@ class YouPornIE(InfoExtractor):              path = compat_urllib_parse_urlparse( video_url ).path              extension = os.path.splitext( path )[1][1:]              format = path.split('/')[4].split('_')[:2] +              # size = format[0]              # bitrate = format[1]              format = "-".join( format )              # title = u'%s-%s-%s' % (video_title, size, bitrate)              formats.append({ -                'id': video_id,                  'url': video_url, -                'uploader': video_uploader, -                'upload_date': upload_date, -                'title': video_title,                  'ext': extension,                  'format': format, -                'thumbnail': thumbnail, -                'description': video_description, -                'age_limit': age_limit, +                'format_id': format,              }) -        if self._downloader.params.get('listformats', None): -            self._print_formats(formats) -            return - -        req_format = self._downloader.params.get('format', 'best') -        self.to_screen(u'Format: %s' % req_format) - -        if req_format is None or req_format == 'best': -            return [formats[0]] -        elif req_format == 'worst': -            return [formats[-1]] -        elif req_format in ('-1', 'all'): -            return formats -        else: -            format = self._specific( req_format, formats ) -            if format is None: -                raise ExtractorError(u'Requested format not available') -            return [format] +        # Sort and remove doubles +        formats.sort(key=lambda format: list(map(lambda s: s.zfill(6), format['format'].split('-')))) +        for i in range(len(formats)-1,0,-1): +            if formats[i]['format_id'] == formats[i-1]['format_id']: +                del formats[i] +         +        return { +            'id': video_id, +            'uploader': video_uploader, +            'upload_date': upload_date, +            'title': video_title, +            'thumbnail': thumbnail, +            'description': video_description, +            'age_limit': age_limit, +            'formats': formats, +        } diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index d7c9b38f9..9053f3ead 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -74,14 +74,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):              self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err))              return False -        galx = None -        dsh = None -        match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page) -        if match: -          galx = match.group(1) -        match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page) -        if match: -          dsh = match.group(1) +        galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"', +                                  login_page, u'Login GALX parameter')          # Log in          login_form_strs = { @@ -95,7 +89,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor):                  u'checkConnection': u'',                  u'checkedDomains': u'youtube',                  u'dnConn': u'', -                u'dsh': dsh,                  u'pstMsg': u'0',                  u'rmShown': u'1',                  u'secTok': u'', @@ -236,11 +229,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):          '136': 'mp4',          '137': 'mp4',          '138': 'mp4', -        '139': 'mp4', -        '140': 'mp4', -        '141': 'mp4',          '160': 'mp4', +        # Dash mp4 audio +        '139': 'm4a', +        '140': 'm4a', +        '141': 'm4a', +          # Dash webm          '171': 'webm',          '172': 'webm', @@ -346,7 +341,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):          },          {              u"url":  u"http://www.youtube.com/watch?v=1ltcDfZMA3U", -            u"file":  u"1ltcDfZMA3U.flv", +            u"file":  u"1ltcDfZMA3U.mp4",              u"note": u"Test VEVO video (#897)",              u"info_dict": {                  u"upload_date": u"20070518", @@ -1116,7 +1111,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):                  'lang': lang,                  'v': video_id,                  'fmt': self._downloader.params.get('subtitlesformat'), -                'name': l[0], +                'name': l[0].encode('utf-8'),              })              url = u'http://www.youtube.com/api/timedtext?' + params              sub_lang_list[lang] = url @@ -1150,7 +1145,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):              list_page = self._download_webpage(list_url, video_id)              caption_list = xml.etree.ElementTree.fromstring(list_page.encode('utf-8'))              original_lang_node = caption_list.find('track') -            if original_lang_node.attrib.get('kind') != 'asr' : +            if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :                  self._downloader.report_warning(u'Video doesn\'t have automatic captions')                  return {}              original_lang = original_lang_node.attrib['lang_code'] @@ -1403,32 +1398,29 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):              # this signatures are encrypted              if 'url_encoded_fmt_stream_map' not in args:                  raise ValueError(u'No stream_map present')  # caught below -            m_s = re.search(r'[&,]s=', args['url_encoded_fmt_stream_map']) +            re_signature = re.compile(r'[&,]s=') +            m_s = re_signature.search(args['url_encoded_fmt_stream_map'])              if m_s is not None:                  self.to_screen(u'%s: Encrypted signatures detected.' % video_id)                  video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']] -            m_s = re.search(r'[&,]s=', args.get('adaptive_fmts', u'')) +            m_s = re_signature.search(args.get('adaptive_fmts', u''))              if m_s is not None: -                if 'url_encoded_fmt_stream_map' in video_info: -                    video_info['url_encoded_fmt_stream_map'][0] += ',' + args['adaptive_fmts'] -                else: -                    video_info['url_encoded_fmt_stream_map'] = [args['adaptive_fmts']] -            elif 'adaptive_fmts' in video_info: -                if 'url_encoded_fmt_stream_map' in video_info: -                    video_info['url_encoded_fmt_stream_map'][0] += ',' + video_info['adaptive_fmts'][0] +                if 'adaptive_fmts' in video_info: +                    video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']                  else: -                    video_info['url_encoded_fmt_stream_map'] = video_info['adaptive_fmts'] +                    video_info['adaptive_fmts'] = [args['adaptive_fmts']]          except ValueError:              pass          if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):              self.report_rtmp_download()              video_url_list = [(None, video_info['conn'][0])] -        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: -            if 'rtmpe%3Dyes' in video_info['url_encoded_fmt_stream_map'][0]: +        elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1: +            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0] +            if 'rtmpe%3Dyes' in encoded_url_map:                  raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)              url_map = {} -            for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','): +            for url_data_str in encoded_url_map.split(','):                  url_data = compat_parse_qs(url_data_str)                  if 'itag' in url_data and 'url' in url_data:                      url = url_data['url'][0] @@ -1481,13 +1473,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):              raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')          results = [] -        for format_param, video_real_url in video_url_list: +        for itag, video_real_url in video_url_list:              # Extension -            video_extension = self._video_extensions.get(format_param, 'flv') +            video_extension = self._video_extensions.get(itag, 'flv') -            video_format = '{0} - {1}{2}'.format(format_param if format_param else video_extension, -                                              self._video_dimensions.get(format_param, '???'), -                                              ' ('+self._special_itags[format_param]+')' if format_param in self._special_itags else '') +            video_format = '{0} - {1}{2}'.format(itag if itag else video_extension, +                                              self._video_dimensions.get(itag, '???'), +                                              ' ('+self._special_itags[itag]+')' if itag in self._special_itags else '')              results.append({                  'id':       video_id, @@ -1498,6 +1490,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):                  'title':    video_title,                  'ext':      video_extension,                  'format':   video_format, +                'format_id': itag,                  'thumbnail':    video_thumbnail,                  'description':  video_description,                  'player_url':   player_url,  | 
