diff options
36 files changed, 979 insertions, 292 deletions
| diff --git a/.gitignore b/.gitignore index ca4e8f353..61cb6bc3c 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,10 @@ youtube-dl.tar.gz  .coverage  cover/  updates_key.pem -*.egg-info
\ No newline at end of file +*.egg-info +*.srt +*.sbv +*.vtt +*.flv +*.mp4 +*.part @@ -113,7 +113,8 @@ which means you can modify it, redistribute it or use it however you like.  ## Video Format Options:      -f, --format FORMAT        video format code, specifiy the order of -                               preference using slashes: "-f 22/17/18" +                               preference using slashes: "-f 22/17/18". "-f mp4" +                               and "-f flv" are also supported      --all-formats              download all available video formats      --prefer-free-formats      prefer free video formats unless a specific one                                 is requested @@ -122,10 +123,8 @@ which means you can modify it, redistribute it or use it however you like.                                 only)  ## Subtitle Options: -    --write-sub                write subtitle file (currently youtube only) -    --write-auto-sub           write automatic subtitle file (currently youtube -                               only) -    --only-sub                 [deprecated] alias of --skip-download +    --write-sub                write subtitle file +    --write-auto-sub           write automatic subtitle file (youtube only)      --all-subs                 downloads all the available subtitles of the                                 video      --list-subs                lists all available subtitles for the video diff --git a/devscripts/bash-completion.in b/devscripts/bash-completion.in index 3b99a9614..bd10f63c2 100644 --- a/devscripts/bash-completion.in +++ b/devscripts/bash-completion.in @@ -4,8 +4,12 @@ __youtube-dl()      COMPREPLY=()      cur="${COMP_WORDS[COMP_CWORD]}"      opts="{{flags}}" +    keywords=":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater" -    if [[ ${cur} == * ]] ; then +    if [[ ${cur} =~ : ]]; then +        COMPREPLY=( $(compgen -W "${keywords}" -- ${cur}) ) +        return 0 +    elif [[ ${cur} == * ]] ; then          COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )          return 0      fi diff --git a/devscripts/gh-pages/add-version.py b/devscripts/gh-pages/add-version.py index 116420ef2..35865b2f3 100755 --- a/devscripts/gh-pages/add-version.py +++ b/devscripts/gh-pages/add-version.py @@ -3,7 +3,8 @@  import json  import sys  import hashlib -import urllib.request +import os.path +  if len(sys.argv) <= 1:      print('Specify the version number as parameter') @@ -23,10 +24,14 @@ filenames = {      'bin': 'youtube-dl',      'exe': 'youtube-dl.exe',      'tar': 'youtube-dl-%s.tar.gz' % version} +build_dir = os.path.join('..', '..', 'build', version)  for key, filename in filenames.items(): -    print('Downloading and checksumming %s...' % filename)      url = 'https://yt-dl.org/downloads/%s/%s' % (version, filename) -    data = urllib.request.urlopen(url).read() +    fn = os.path.join(build_dir, filename) +    with open(fn, 'rb') as f: +        data = f.read() +    if not data: +        raise ValueError('File %s is empty!' % fn)      sha256sum = hashlib.sha256(data).hexdigest()      new_version[key] = (url, sha256sum) diff --git a/devscripts/gh-pages/update-sites.py b/devscripts/gh-pages/update-sites.py new file mode 100755 index 000000000..33f242480 --- /dev/null +++ b/devscripts/gh-pages/update-sites.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 + +import sys +import os +import textwrap + +# We must be able to import youtube_dl +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + +import youtube_dl + +def main(): +    with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: +        template = tmplf.read() + +    ie_htmls = [] +    for ie in sorted(youtube_dl.gen_extractors(), key=lambda i: i.IE_NAME.lower()): +        ie_html = '<b>{}</b>'.format(ie.IE_NAME) +        try: +            ie_html += ': {}'.format(ie.IE_DESC) +        except AttributeError: +            pass +        if ie.working() == False: +            ie_html += ' (Currently broken)' +        ie_htmls.append('<li>{}</li>'.format(ie_html)) + +    template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t')) + +    with open('supportedsites.html', 'w', encoding='utf-8') as sitesf: +        sitesf.write(template) + +if __name__ == '__main__': +    main() diff --git a/devscripts/release.sh b/devscripts/release.sh index 24c9ad8d8..62c68a6cf 100755 --- a/devscripts/release.sh +++ b/devscripts/release.sh @@ -85,6 +85,7 @@ ROOT=$(pwd)      "$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"      "$ROOT/devscripts/gh-pages/generate-download.py"      "$ROOT/devscripts/gh-pages/update-copyright.py" +    "$ROOT/devscripts/gh-pages/update-sites.py"      git add *.html *.html.in update      git commit -m "release $version"      git show HEAD diff --git a/devscripts/youtube_genalgo.py b/devscripts/youtube_genalgo.py index 13df535c7..b390c7e2e 100644 --- a/devscripts/youtube_genalgo.py +++ b/devscripts/youtube_genalgo.py @@ -20,21 +20,21 @@ tests = [      # 87      ("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$^&*()_-+={[]}|:;?/>.<",       "uioplkjhgfdsazxcvbnm1t34567890QWE2TYUIOPLKJHGFDSAZXCVeNM!@#$^&*()_-+={[]}|:;?/>.<"), -    # 86 - vflg0g8PQ 2013/08/29 +    # 86 - vfluy6kdb 2013/09/06      ("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[|};?/>.<", -     ">/?;}|[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWq0987654321mnbvcxzasdfghjklpoiuytr"), -    # 85 +     "yuioplkjhgfdsazxcvbnm12345678q0QWrRTYUIOELKJHGFD-AZXCVBNM!@#$%^&*()_<+={[|};?/>.S"), +    # 85 - vflkuzxcs 2013/09/11      ("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?/>.<", -     ".>/?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWQ0q876543r1mnbvcx9asdfghjklpoiuyt2"), +     "T>/?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOvUY.REWQ0987654321mnbqcxzasdfghjklpoiuytr"),      # 84 - vflg0g8PQ 2013/08/29 (sporadic)      ("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?>.<",       ">?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWq0987654321mnbvcxzasdfghjklpoiuytr"),      # 83      ("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!#$%^&*()_+={[};?/>.<",       ".>/?;}[{=+_)(*&^%<#!MNBVCXZASPFGHJKLwOIUYTREWQ0987654321mnbvcxzasdfghjklpoiuytreq"), -    # 82 - vflZK4ZYR 2013/08/23 +    # 82 - vflGNjMhJ 2013/09/12      ("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKHGFDSAZXCVBNM!@#$%^&*(-+={[};?/>.<", -     "wertyuioplkjhgfdsaqxcvbnm1234567890QWERTYUIOPLKHGFDSAZXCVBNM!@#$%^&z(-+={[};?/>.<"), +     ".>/?;}[<=+-(*&^%$#@!MNBVCXeASDFGHKLPOqUYTREWQ0987654321mnbvcxzasdfghjklpoiuytrIwZ"),      # 81 - vflLC8JvQ 2013/07/25      ("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKHGFDSAZXCVBNM!@#$%^&*(-+={[};?/>.",       "C>/?;}[{=+-(*&^%$#@!MNBVYXZASDFGHKLPOIU.TREWQ0q87659321mnbvcxzasdfghjkl4oiuytrewp"), diff --git a/test/parameters.json b/test/parameters.json index 96998b5c3..f042880ed 100644 --- a/test/parameters.json +++ b/test/parameters.json @@ -38,7 +38,6 @@      "writedescription": false,       "writeinfojson": true,       "writesubtitles": false, -    "onlysubtitles": false,      "allsubtitles": false,      "listssubtitles": false  } diff --git a/test/test_all_urls.py b/test/test_all_urls.py index c54faa380..99fc7bd28 100644 --- a/test/test_all_urls.py +++ b/test/test_all_urls.py @@ -11,24 +11,49 @@ from youtube_dl.extractor import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE,  from helper import get_testcases  class TestAllURLsMatching(unittest.TestCase): +    def setUp(self): +        self.ies = gen_extractors() + +    def matching_ies(self, url): +        return [ie.IE_NAME for ie in self.ies if ie.suitable(url) and ie.IE_NAME != 'generic'] + +    def assertMatch(self, url, ie_list): +        self.assertEqual(self.matching_ies(url), ie_list) +      def test_youtube_playlist_matching(self): -        self.assertTrue(YoutubePlaylistIE.suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')) -        self.assertTrue(YoutubePlaylistIE.suitable(u'UUBABnxM4Ar9ten8Mdjj1j0Q')) #585 -        self.assertTrue(YoutubePlaylistIE.suitable(u'PL63F0C78739B09958')) -        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')) -        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')) -        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')) -        self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668 -        self.assertFalse(YoutubePlaylistIE.suitable(u'PLtS2H6bU1M')) +        assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist']) +        assertPlaylist(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') +        assertPlaylist(u'UUBABnxM4Ar9ten8Mdjj1j0Q') #585 +        assertPlaylist(u'PL63F0C78739B09958') +        assertPlaylist(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') +        assertPlaylist(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') +        assertPlaylist(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC') +        assertPlaylist(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668 +        self.assertFalse('youtube:playlist' in self.matching_ies(u'PLtS2H6bU1M'))      def test_youtube_matching(self):          self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))          self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668 +        self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube']) +        self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])      def test_youtube_channel_matching(self): -        self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM')) -        self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec')) -        self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')) +        assertChannel = lambda url: self.assertMatch(url, ['youtube:channel']) +        assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM') +        assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec') +        assertChannel('https://www.youtube.com/channel/HCtnHdj3df7iM/videos') + +    def test_youtube_user_matching(self): +        self.assertMatch('www.youtube.com/NASAgovVideo/videos', ['youtube:user']) + +    def test_youtube_feeds(self): +        self.assertMatch('https://www.youtube.com/feed/watch_later', ['youtube:watch_later']) +        self.assertMatch('https://www.youtube.com/feed/subscriptions', ['youtube:subscriptions']) +        self.assertMatch('https://www.youtube.com/feed/recommended', ['youtube:recommended']) +        self.assertMatch('https://www.youtube.com/my_favorites', ['youtube:favorites']) + +    def test_youtube_show_matching(self): +        self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])      def test_justin_tv_channelid_matching(self):          self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv")) @@ -47,10 +72,13 @@ class TestAllURLsMatching(unittest.TestCase):          self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361"))      def test_youtube_extract(self): -        self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc') -        self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc') -        self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc') -        self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch_popup?v=BaW_jenozKc'), 'BaW_jenozKc') +        assertExtractId = lambda url, id: self.assertEqual(YoutubeIE()._extract_id(url), id) +        assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') +        assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc') +        assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc') +        assertExtractId('https://www.youtube.com/watch_popup?v=BaW_jenozKc', 'BaW_jenozKc') +        assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc') +        assertExtractId('BaW_jenozKc', 'BaW_jenozKc')      def test_no_duplicates(self):          ies = gen_extractors() @@ -63,15 +91,12 @@ class TestAllURLsMatching(unittest.TestCase):                      self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))      def test_keywords(self): -        ies = gen_extractors() -        matching_ies = lambda url: [ie.IE_NAME for ie in ies -                                    if ie.suitable(url) and ie.IE_NAME != 'generic'] -        self.assertEqual(matching_ies(':ytsubs'), ['youtube:subscriptions']) -        self.assertEqual(matching_ies(':ytsubscriptions'), ['youtube:subscriptions']) -        self.assertEqual(matching_ies(':thedailyshow'), ['ComedyCentral']) -        self.assertEqual(matching_ies(':tds'), ['ComedyCentral']) -        self.assertEqual(matching_ies(':colbertreport'), ['ComedyCentral']) -        self.assertEqual(matching_ies(':cr'), ['ComedyCentral']) +        self.assertMatch(':ytsubs', ['youtube:subscriptions']) +        self.assertMatch(':ytsubscriptions', ['youtube:subscriptions']) +        self.assertMatch(':thedailyshow', ['ComedyCentral']) +        self.assertMatch(':tds', ['ComedyCentral']) +        self.assertMatch(':colbertreport', ['ComedyCentral']) +        self.assertMatch(':cr', ['ComedyCentral'])  if __name__ == '__main__': diff --git a/test/test_dailymotion_subtitles.py b/test/test_dailymotion_subtitles.py new file mode 100644 index 000000000..bcd9f79f6 --- /dev/null +++ b/test/test_dailymotion_subtitles.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +import sys +import unittest +import json +import io +import hashlib + +# Allow direct execution +import os +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from youtube_dl.extractor import DailymotionIE +from youtube_dl.utils import * +from helper import FakeYDL + +md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() + +class TestDailymotionSubtitles(unittest.TestCase): +    def setUp(self): +        self.DL = FakeYDL() +        self.url = 'http://www.dailymotion.com/video/xczg00' +    def getInfoDict(self): +        IE = DailymotionIE(self.DL) +        info_dict = IE.extract(self.url) +        return info_dict +    def getSubtitles(self): +        info_dict = self.getInfoDict() +        return info_dict[0]['subtitles'] +    def test_no_writesubtitles(self): +        subtitles = self.getSubtitles() +        self.assertEqual(subtitles, None) +    def test_subtitles(self): +        self.DL.params['writesubtitles'] = True +        subtitles = self.getSubtitles() +        self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f') +    def test_subtitles_lang(self): +        self.DL.params['writesubtitles'] = True +        self.DL.params['subtitleslangs'] = ['fr'] +        subtitles = self.getSubtitles() +        self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792') +    def test_allsubtitles(self): +        self.DL.params['allsubtitles'] = True +        subtitles = self.getSubtitles() +        self.assertEqual(len(subtitles.keys()), 5) +    def test_list_subtitles(self): +        self.DL.params['listsubtitles'] = True +        info_dict = self.getInfoDict() +        self.assertEqual(info_dict, None) +    def test_automatic_captions(self): +        self.DL.params['writeautomaticsub'] = True +        self.DL.params['subtitleslang'] = ['en'] +        subtitles = self.getSubtitles() +        self.assertTrue(len(subtitles.keys()) == 0) +    def test_nosubtitles(self): +        self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv' +        self.DL.params['allsubtitles'] = True +        subtitles = self.getSubtitles() +        self.assertEqual(len(subtitles), 0) +    def test_multiple_langs(self): +        self.DL.params['writesubtitles'] = True +        langs = ['es', 'fr', 'de'] +        self.DL.params['subtitleslangs'] = langs +        subtitles = self.getSubtitles() +        for lang in langs: +            self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) + +if __name__ == '__main__': +    unittest.main() diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py index 641206277..5632871ac 100644 --- a/test/test_youtube_subtitles.py +++ b/test/test_youtube_subtitles.py @@ -18,85 +18,63 @@ md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()  class TestYoutubeSubtitles(unittest.TestCase):      def setUp(self): -        DL = FakeYDL() -        DL.params['allsubtitles'] = False -        DL.params['writesubtitles'] = False -        DL.params['subtitlesformat'] = 'srt' -        DL.params['listsubtitles'] = False -    def test_youtube_no_subtitles(self): -        DL = FakeYDL() -        DL.params['writesubtitles'] = False -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') -        subtitles = info_dict[0]['subtitles'] +        self.DL = FakeYDL() +        self.url = 'QRS8MkLhQmM' +    def getInfoDict(self): +        IE = YoutubeIE(self.DL) +        info_dict = IE.extract(self.url) +        return info_dict +    def getSubtitles(self): +        info_dict = self.getInfoDict() +        return info_dict[0]['subtitles']         +    def test_youtube_no_writesubtitles(self): +        self.DL.params['writesubtitles'] = False +        subtitles = self.getSubtitles()          self.assertEqual(subtitles, None)      def test_youtube_subtitles(self): -        DL = FakeYDL() -        DL.params['writesubtitles'] = True -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') -        sub = info_dict[0]['subtitles']['en'] -        self.assertEqual(md5(sub), '4cd9278a35ba2305f47354ee13472260') -    def test_youtube_subtitles_it(self): -        DL = FakeYDL() -        DL.params['writesubtitles'] = True -        DL.params['subtitleslangs'] = ['it'] -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') -        sub = info_dict[0]['subtitles']['it'] -        self.assertEqual(md5(sub), '164a51f16f260476a05b50fe4c2f161d') -    def test_youtube_onlysubtitles(self): -        DL = FakeYDL() -        DL.params['writesubtitles'] = True -        DL.params['onlysubtitles'] = True -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') -        sub = info_dict[0]['subtitles']['en'] -        self.assertEqual(md5(sub), '4cd9278a35ba2305f47354ee13472260') +        self.DL.params['writesubtitles'] = True +        subtitles = self.getSubtitles() +        self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260') +    def test_youtube_subtitles_lang(self): +        self.DL.params['writesubtitles'] = True +        self.DL.params['subtitleslangs'] = ['it'] +        subtitles = self.getSubtitles() +        self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')      def test_youtube_allsubtitles(self): -        DL = FakeYDL() -        DL.params['allsubtitles'] = True -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') -        subtitles = info_dict[0]['subtitles'] +        self.DL.params['allsubtitles'] = True +        subtitles = self.getSubtitles()          self.assertEqual(len(subtitles.keys()), 13)      def test_youtube_subtitles_sbv_format(self): -        DL = FakeYDL() -        DL.params['writesubtitles'] = True -        DL.params['subtitlesformat'] = 'sbv' -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') -        sub = info_dict[0]['subtitles']['en'] -        self.assertEqual(md5(sub), '13aeaa0c245a8bed9a451cb643e3ad8b') +        self.DL.params['writesubtitles'] = True +        self.DL.params['subtitlesformat'] = 'sbv' +        subtitles = self.getSubtitles() +        self.assertEqual(md5(subtitles['en']), '13aeaa0c245a8bed9a451cb643e3ad8b')      def test_youtube_subtitles_vtt_format(self): -        DL = FakeYDL() -        DL.params['writesubtitles'] = True -        DL.params['subtitlesformat'] = 'vtt' -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') -        sub = info_dict[0]['subtitles']['en'] -        self.assertEqual(md5(sub), '356cdc577fde0c6783b9b822e7206ff7') +        self.DL.params['writesubtitles'] = True +        self.DL.params['subtitlesformat'] = 'vtt' +        subtitles = self.getSubtitles() +        self.assertEqual(md5(subtitles['en']), '356cdc577fde0c6783b9b822e7206ff7')      def test_youtube_list_subtitles(self): -        DL = FakeYDL() -        DL.params['listsubtitles'] = True -        IE = YoutubeIE(DL) -        info_dict = IE.extract('QRS8MkLhQmM') +        self.DL.params['listsubtitles'] = True +        info_dict = self.getInfoDict()          self.assertEqual(info_dict, None)      def test_youtube_automatic_captions(self): -        DL = FakeYDL() -        DL.params['writeautomaticsub'] = True -        DL.params['subtitleslangs'] = ['it'] -        IE = YoutubeIE(DL) -        info_dict = IE.extract('8YoUxe5ncPo') -        sub = info_dict[0]['subtitles']['it'] -        self.assertTrue(sub is not None) +        self.url = '8YoUxe5ncPo' +        self.DL.params['writeautomaticsub'] = True +        self.DL.params['subtitleslangs'] = ['it'] +        subtitles = self.getSubtitles() +        self.assertTrue(subtitles['it'] is not None) +    def test_youtube_nosubtitles(self): +        self.url = 'sAjKT8FhjI8' +        self.DL.params['allsubtitles'] = True +        subtitles = self.getSubtitles() +        self.assertEqual(len(subtitles), 0)      def test_youtube_multiple_langs(self): -        DL = FakeYDL() -        DL.params['writesubtitles'] = True +        self.url = 'QRS8MkLhQmM' +        self.DL.params['writesubtitles'] = True          langs = ['it', 'fr', 'de'] -        DL.params['subtitleslangs'] = langs -        IE = YoutubeIE(DL) -        subtitles = IE.extract('QRS8MkLhQmM')[0]['subtitles'] +        self.DL.params['subtitleslangs'] = langs +        subtitles = self.getSubtitles()          for lang in langs:              self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 7c5ac4bc2..0b5a5d77d 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -66,7 +66,7 @@ class FileDownloader(object):      @staticmethod      def format_seconds(seconds):          (mins, secs) = divmod(seconds, 60) -        (hours, eta_mins) = divmod(mins, 60) +        (hours, mins) = divmod(mins, 60)          if hours > 99:              return '--:--:--'          if hours == 0: diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index afce28040..c2f992b8e 100644 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -493,7 +493,7 @@ class YoutubeDL(object):                  with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:                      descfile.write(info_dict['description'])              except (KeyError, TypeError): -                self.report_warning(u'Cannot extract description.') +                self.report_warning(u'There\'s no description to write.')              except (OSError, IOError):                  self.report_error(u'Cannot write description file ' + descfn)                  return diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 431460c57..696e54f49 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -28,6 +28,8 @@ __authors__  = (      'Axel Noack',      'Albert Kim',      'Pierre Rudloff', +    'Huarong Huo', +    'Ismael Mejía',  )  __license__ = 'Public Domain' @@ -192,7 +194,7 @@ def parseOpts(overrideArguments=None):      video_format.add_option('-f', '--format',              action='store', dest='format', metavar='FORMAT', -            help='video format code, specifiy the order of preference using slashes: "-f 22/17/18"') +            help='video format code, specifiy the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported')      video_format.add_option('--all-formats',              action='store_const', dest='format', help='download all available video formats', const='all')      video_format.add_option('--prefer-free-formats', @@ -204,13 +206,10 @@ def parseOpts(overrideArguments=None):      subtitles.add_option('--write-sub', '--write-srt',              action='store_true', dest='writesubtitles', -            help='write subtitle file (currently youtube only)', default=False) +            help='write subtitle file', default=False)      subtitles.add_option('--write-auto-sub', '--write-automatic-sub',              action='store_true', dest='writeautomaticsub', -            help='write automatic subtitle file (currently youtube only)', default=False) -    subtitles.add_option('--only-sub', -            action='store_true', dest='skip_download', -            help='[deprecated] alias of --skip-download', default=False) +            help='write automatic subtitle file (youtube only)', default=False)      subtitles.add_option('--all-subs',              action='store_true', dest='allsubtitles',              help='downloads all the available subtitles of the video', default=False) @@ -221,7 +220,7 @@ def parseOpts(overrideArguments=None):              action='store', dest='subtitlesformat', metavar='FORMAT',              help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')      subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang', -            action='callback', dest='subtitleslang', metavar='LANGS', type='str', +            action='callback', dest='subtitleslangs', metavar='LANGS', type='str',              default=[], callback=_comma_separated_values_options_callback,              help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'') @@ -592,7 +591,7 @@ def _real_main(argv=None):          'allsubtitles': opts.allsubtitles,          'listsubtitles': opts.listsubtitles,          'subtitlesformat': opts.subtitlesformat, -        'subtitleslangs': opts.subtitleslang, +        'subtitleslangs': opts.subtitleslangs,          'matchtitle': decodeOption(opts.matchtitle),          'rejecttitle': decodeOption(opts.rejecttitle),          'max_downloads': opts.max_downloads, diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index 90f1a4418..26cf24935 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -18,15 +18,21 @@ from .condenast import CondeNastIE  from .criterion import CriterionIE  from .cspan import CSpanIE  from .dailymotion import DailymotionIE, DailymotionPlaylistIE +from .daum import DaumIE  from .depositfiles import DepositFilesIE  from .dotsub import DotsubIE  from .dreisat import DreiSatIE +from .defense import DefenseGouvFrIE  from .ehow import EHowIE  from .eighttracks import EightTracksIE  from .escapist import EscapistIE  from .exfm import ExfmIE  from .facebook import FacebookIE  from .flickr import FlickrIE +from .francetv import ( +    PluzzIE, +    FranceTvInfoIE, +)  from .freesound import FreesoundIE  from .funnyordie import FunnyOrDieIE  from .gamespot import GameSpotIE @@ -50,12 +56,14 @@ from .keek import KeekIE  from .liveleak import LiveLeakIE  from .livestream import LivestreamIE  from .metacafe import MetacafeIE +from .metacritic import MetacriticIE  from .mit import TechTVMITIE, MITIE  from .mixcloud import MixcloudIE  from .mtv import MTVIE  from .muzu import MuzuTVIE  from .myspass import MySpassIE  from .myvideo import MyVideoIE +from .naver import NaverIE  from .nba import NBAIE  from .nbc import NBCNewsIE  from .ooyala import OoyalaIE @@ -71,6 +79,7 @@ from .roxwel import RoxwelIE  from .rtlnow import RTLnowIE  from .sina import SinaIE  from .slashdot import SlashdotIE +from .slideshare import SlideshareIE  from .sohu import SohuIE  from .soundcloud import SoundcloudIE, SoundcloudSetIE  from .spiegel import SpiegelIE @@ -89,6 +98,7 @@ from .tutv import TutvIE  from .unistra import UnistraIE  from .ustream import UstreamIE  from .vbox7 import Vbox7IE +from .veehd import VeeHDIE  from .veoh import VeohIE  from .vevo import VevoIE  from .videofyme import VideofyMeIE diff --git a/youtube_dl/extractor/canalc2.py b/youtube_dl/extractor/canalc2.py index 50832217a..e7f4fa9fd 100644 --- a/youtube_dl/extractor/canalc2.py +++ b/youtube_dl/extractor/canalc2.py @@ -5,7 +5,7 @@ from .common import InfoExtractor  class Canalc2IE(InfoExtractor): -    _IE_NAME = 'canalc2.tv' +    IE_NAME = 'canalc2.tv'      _VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?idVideo=(\d+)&voir=oui'      _TEST = { diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py index 1f02519a0..1db9b24cf 100644 --- a/youtube_dl/extractor/canalplus.py +++ b/youtube_dl/extractor/canalplus.py @@ -1,3 +1,4 @@ +# encoding: utf-8  import re  import xml.etree.ElementTree @@ -5,24 +6,29 @@ from .common import InfoExtractor  from ..utils import unified_strdate  class CanalplusIE(InfoExtractor): -    _VALID_URL = r'https?://(www\.canalplus\.fr/.*?\?vid=|player\.canalplus\.fr/#/)(?P<id>\d+)' +    _VALID_URL = r'https?://(www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>\d+))'      _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'      IE_NAME = u'canalplus.fr'      _TEST = { -        u'url': u'http://www.canalplus.fr/c-divertissement/pid3351-c-le-petit-journal.html?vid=889861', -        u'file': u'889861.flv', -        u'md5': u'590a888158b5f0d6832f84001fbf3e99', +        u'url': u'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470', +        u'file': u'922470.flv',          u'info_dict': { -            u'title': u'Le Petit Journal 20/06/13 - La guerre des drone', -            u'upload_date': u'20130620', +            u'title': u'Zapping - 26/08/13', +            u'description': u'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013', +            u'upload_date': u'20130826', +        }, +        u'params': { +            u'skip_download': True,          }, -        u'skip': u'Requires rtmpdump'      }      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('id') +        if video_id is None: +            webpage = self._download_webpage(url, mobj.group('path')) +            video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')          info_url = self._VIDEO_INFO_TEMPLATE % video_id          info_page = self._download_webpage(info_url,video_id,                                              u'Downloading video info') @@ -43,4 +49,6 @@ class CanalplusIE(InfoExtractor):                  'ext': 'flv',                  'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),                  'thumbnail': media.find('IMAGES/GRAND').text, +                'description': infos.find('DESCRIPTION').text, +                'view_count': int(infos.find('NB_VUES').text),                  } diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 1ea449ca8..360113f9c 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -3,18 +3,22 @@ import json  import itertools  from .common import InfoExtractor +from .subtitles import SubtitlesInfoExtractor +  from ..utils import (      compat_urllib_request, +    compat_str,      get_element_by_attribute,      get_element_by_id,      ExtractorError,  ) -class DailymotionIE(InfoExtractor): + +class DailymotionIE(SubtitlesInfoExtractor):      """Information Extractor for Dailymotion""" -    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' +    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)'      IE_NAME = u'dailymotion'      _TEST = {          u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech', @@ -33,6 +37,7 @@ class DailymotionIE(InfoExtractor):          video_id = mobj.group(1).split('_')[0].split('?')[0]          video_extension = 'mp4' +        url = 'http://www.dailymotion.com/video/%s' % video_id          # Retrieve video webpage to extract further information          request = compat_urllib_request.Request(url) @@ -55,7 +60,8 @@ class DailymotionIE(InfoExtractor):          embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id          embed_page = self._download_webpage(embed_url, video_id,                                              u'Downloading embed page') -        info = self._search_regex(r'var info = ({.*?}),', embed_page, 'video info') +        info = self._search_regex(r'var info = ({.*?}),$', embed_page, +            'video info', flags=re.MULTILINE)          info = json.loads(info)          # TODO: support choosing qualities @@ -71,6 +77,12 @@ class DailymotionIE(InfoExtractor):              raise ExtractorError(u'Unable to extract video URL')          video_url = info[max_quality] +        # subtitles +        video_subtitles = self.extract_subtitles(video_id) +        if self._downloader.params.get('listsubtitles', False): +            self._list_available_subtitles(video_id) +            return +          return [{              'id':       video_id,              'url':      video_url, @@ -78,9 +90,25 @@ class DailymotionIE(InfoExtractor):              'upload_date':  video_upload_date,              'title':    self._og_search_title(webpage),              'ext':      video_extension, +            'subtitles':    video_subtitles,              'thumbnail': info['thumbnail_url']          }] +    def _get_available_subtitles(self, video_id): +        try: +            sub_list = self._download_webpage( +                'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id, +                video_id, note=False) +        except ExtractorError as err: +            self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err)) +            return {} +        info = json.loads(sub_list) +        if (info['total'] > 0): +            sub_lang_list = dict((l['language'], l['url']) for l in info['list']) +            return sub_lang_list +        self._downloader.report_warning(u'video doesn\'t have subtitles') +        return {} +  class DailymotionPlaylistIE(InfoExtractor):      _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/' diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py new file mode 100644 index 000000000..a804e83bd --- /dev/null +++ b/youtube_dl/extractor/daum.py @@ -0,0 +1,74 @@ +# encoding: utf-8 +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( +    compat_urllib_parse, +    determine_ext, +) + + +class DaumIE(InfoExtractor): +    _VALID_URL = r'https?://tvpot\.daum\.net/.*?clipid=(?P<id>\d+)' +    IE_NAME = u'daum.net' + +    _TEST = { +        u'url': u'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690', +        u'file': u'52554690.mp4', +        u'info_dict': { +            u'title': u'DOTA 2GETHER 시즌2 6회 - 2부', +            u'description': u'DOTA 2GETHER 시즌2 6회 - 2부', +            u'upload_date': u'20130831', +            u'duration': 3868, +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group(1) +        canonical_url = 'http://tvpot.daum.net/v/%s' % video_id +        webpage = self._download_webpage(canonical_url, video_id) +        full_id = self._search_regex(r'<link rel="video_src" href=".+?vid=(.+?)"', +            webpage, u'full id') +        query = compat_urllib_parse.urlencode({'vid': full_id}) +        info_xml = self._download_webpage( +            'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id, +            u'Downloading video info') +        urls_xml = self._download_webpage( +            'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query, +            video_id, u'Downloading video formats info') +        info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) +        urls = xml.etree.ElementTree.fromstring(urls_xml.encode('utf-8')) + +        self.to_screen(u'%s: Getting video urls' % video_id) +        formats = [] +        for format_el in urls.findall('result/output_list/output_list'): +            profile = format_el.attrib['profile'] +            format_query = compat_urllib_parse.urlencode({ +                'vid': full_id, +                'profile': profile, +            }) +            url_xml = self._download_webpage( +                'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query, +                video_id, note=False) +            url_doc = xml.etree.ElementTree.fromstring(url_xml.encode('utf-8')) +            format_url = url_doc.find('result/url').text +            formats.append({ +                'url': format_url, +                'ext': determine_ext(format_url), +                'format_id': profile, +            }) + +        info = { +            'id': video_id, +            'title': info.find('TITLE').text, +            'formats': formats, +            'thumbnail': self._og_search_thumbnail(webpage), +            'description': info.find('CONTENTS').text, +            'duration': int(info.find('DURATION').text), +            'upload_date': info.find('REGDTTM').text[:8], +        } +        # TODO: Remove when #980 has been merged +        info.update(formats[-1]) +        return info diff --git a/youtube_dl/extractor/defense.py b/youtube_dl/extractor/defense.py new file mode 100644 index 000000000..424d960da --- /dev/null +++ b/youtube_dl/extractor/defense.py @@ -0,0 +1,39 @@ +import re +import json + +from .common import InfoExtractor + + +class DefenseGouvFrIE(InfoExtractor): +    _IE_NAME = 'defense.gouv.fr' +    _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/' +        r'ligthboxvideo/base-de-medias/webtv/(.*)') + +    _TEST = { +        u'url': (u'http://www.defense.gouv.fr/layout/set/ligthboxvideo/' +        u'base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1'), +        u'file': u'11213.mp4', +        u'md5': u'75bba6124da7e63d2d60b5244ec9430c', +        "info_dict": { +            "title": "attaque-chimique-syrienne-du-21-aout-2013-1" +        } +    } + +    def _real_extract(self, url): +        title = re.match(self._VALID_URL, url).group(1) +        webpage = self._download_webpage(url, title) +        video_id = self._search_regex( +            r"flashvars.pvg_id=\"(\d+)\";", +            webpage, 'ID') +         +        json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/' +            + video_id) +        info = self._download_webpage(json_url, title, +                                                  'Downloading JSON config') +        video_url = json.loads(info)['renditions'][0]['url'] +         +        return {'id': video_id, +                'ext': 'mp4', +                'url': video_url, +                'title': title, +                } diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py new file mode 100644 index 000000000..f2b12c884 --- /dev/null +++ b/youtube_dl/extractor/francetv.py @@ -0,0 +1,77 @@ +# encoding: utf-8 +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( +    compat_urlparse, +) + + +class FranceTVBaseInfoExtractor(InfoExtractor): +    def _extract_video(self, video_id): +        xml_desc = self._download_webpage( +            'http://www.francetvinfo.fr/appftv/webservices/video/' +            'getInfosOeuvre.php?id-diffusion=' +            + video_id, video_id, 'Downloading XML config') +        info = xml.etree.ElementTree.fromstring(xml_desc.encode('utf-8')) + +        manifest_url = info.find('videos/video/url').text +        video_url = manifest_url.replace('manifest.f4m', 'index_2_av.m3u8') +        video_url = video_url.replace('/z/', '/i/') +        thumbnail_path = info.find('image').text + +        return {'id': video_id, +                'ext': 'mp4', +                'url': video_url, +                'title': info.find('titre').text, +                'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path), +                'description': info.find('synopsis').text, +                } + + +class PluzzIE(FranceTVBaseInfoExtractor): +    IE_NAME = u'pluzz.francetv.fr' +    _VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html' + +    _TEST = { +        u'url': u'http://pluzz.francetv.fr/videos/allo_rufo_saison5_,88439064.html', +        u'file': u'88439064.mp4', +        u'info_dict': { +            u'title': u'Allô Rufo', +            u'description': u'md5:d909f1ebdf963814b65772aea250400e', +        }, +        u'params': { +            u'skip_download': True, +        }, +    } + +    def _real_extract(self, url): +        title = re.match(self._VALID_URL, url).group(1) +        webpage = self._download_webpage(url, title) +        video_id = self._search_regex( +            r'data-diffusion="(\d+)"', webpage, 'ID') +        return self._extract_video(video_id) + + +class FranceTvInfoIE(FranceTVBaseInfoExtractor): +    IE_NAME = u'francetvinfo.fr' +    _VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+).html' + +    _TEST = { +        u'url': u'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html', +        u'file': u'84981923.mp4', +        u'info_dict': { +            u'title': u'Soir 3', +        }, +        u'params': { +            u'skip_download': True, +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        page_title = mobj.group('title') +        webpage = self._download_webpage(url, page_title) +        video_id = self._search_regex(r'id-video=(\d+?)"', webpage, u'video id') +        return self._extract_video(video_id) diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py index 4508f0dfa..f3d86a711 100644 --- a/youtube_dl/extractor/funnyordie.py +++ b/youtube_dl/extractor/funnyordie.py @@ -21,7 +21,7 @@ class FunnyOrDieIE(InfoExtractor):          video_id = mobj.group('id')          webpage = self._download_webpage(url, video_id) -        video_url = self._search_regex(r'type: "video/mp4", src: "(.*?)"', +        video_url = self._search_regex(r'type="video/mp4" src="(.*?)"',              webpage, u'video URL', flags=re.DOTALL)          info = { diff --git a/youtube_dl/extractor/gamespot.py b/youtube_dl/extractor/gamespot.py index 7585b7061..cd3bbe65f 100644 --- a/youtube_dl/extractor/gamespot.py +++ b/youtube_dl/extractor/gamespot.py @@ -14,7 +14,7 @@ class GameSpotIE(InfoExtractor):          u"file": u"6410818.mp4",          u"md5": u"b2a30deaa8654fcccd43713a6b6a4825",          u"info_dict": { -            u"title": u"Arma III - Community Guide: SITREP I", +            u"title": u"Arma 3 - Community Guide: SITREP I",              u"upload_date": u"20130627",           }      } diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index dc4dea4ad..f92e61fea 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -109,6 +109,11 @@ class GenericIE(InfoExtractor):          return new_url      def _real_extract(self, url): +        parsed_url = compat_urlparse.urlparse(url) +        if not parsed_url.scheme: +            self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http') +            return self.url_result('http://' + url) +          try:              new_url = self._test_redirect(url)              if new_url: @@ -153,7 +158,7 @@ class GenericIE(InfoExtractor):                  mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)          if mobj is None:              # HTML5 video -            mobj = re.search(r'<video[^<]*>.*?<source .*?src="([^"]+)"', webpage, flags=re.DOTALL) +            mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)          if mobj is None:              raise ExtractorError(u'Invalid URL: %s' % url) @@ -162,9 +167,9 @@ class GenericIE(InfoExtractor):          if mobj.group(1) is None:              raise ExtractorError(u'Invalid URL: %s' % url) -        video_url = compat_urllib_parse.unquote(mobj.group(1)) +        video_url = mobj.group(1)          video_url = compat_urlparse.urljoin(url, video_url) -        video_id = os.path.basename(video_url) +        video_id = compat_urllib_parse.unquote(os.path.basename(video_url))          # here's a fun little line of code for you:          video_extension = os.path.splitext(video_id)[1][1:] diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py index 6104c4b5e..46954337f 100644 --- a/youtube_dl/extractor/howcast.py +++ b/youtube_dl/extractor/howcast.py @@ -19,8 +19,7 @@ class HowcastIE(InfoExtractor):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('id') -        webpage_url = 'http://www.howcast.com/videos/' + video_id -        webpage = self._download_webpage(webpage_url, video_id) +        webpage = self._download_webpage(url, video_id)          self.report_extraction(video_id) diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index e38dc98b4..e537648ff 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -122,7 +122,7 @@ class MetacafeIE(InfoExtractor):          video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, u'title')          description = self._og_search_description(webpage)          video_uploader = self._html_search_regex( -                r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("channel","([^"]+)"\);', +                r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',                  webpage, u'uploader nickname', fatal=False)          return { diff --git a/youtube_dl/extractor/metacritic.py b/youtube_dl/extractor/metacritic.py new file mode 100644 index 000000000..449138b56 --- /dev/null +++ b/youtube_dl/extractor/metacritic.py @@ -0,0 +1,55 @@ +import re +import xml.etree.ElementTree +import operator + +from .common import InfoExtractor + + +class MetacriticIE(InfoExtractor): +    _VALID_URL = r'https?://www\.metacritic\.com/.+?/trailers/(?P<id>\d+)' + +    _TEST = { +        u'url': u'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222', +        u'file': u'3698222.mp4', +        u'info_dict': { +            u'title': u'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors', +            u'description': u'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.', +            u'duration': 221, +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') +        webpage = self._download_webpage(url, video_id) +        # The xml is not well formatted, there are raw '&' +        info_xml = self._download_webpage('http://www.metacritic.com/video_data?video=' + video_id, +            video_id, u'Downloading info xml').replace('&', '&') +        info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) + +        clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id) +        formats = [] +        for videoFile in clip.findall('httpURI/videoFile'): +            rate_str = videoFile.find('rate').text +            video_url = videoFile.find('filePath').text +            formats.append({ +                'url': video_url, +                'ext': 'mp4', +                'format_id': rate_str, +                'rate': int(rate_str), +            }) +        formats.sort(key=operator.itemgetter('rate')) + +        description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>', +            webpage, u'description', flags=re.DOTALL) + +        info = { +            'id': video_id, +            'title': clip.find('title').text, +            'formats': formats, +            'description': description, +            'duration': int(clip.find('duration').text), +        } +        # TODO: Remove when #980 has been merged +        info.update(formats[-1]) +        return info diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py new file mode 100644 index 000000000..9df236d69 --- /dev/null +++ b/youtube_dl/extractor/naver.py @@ -0,0 +1,73 @@ +# encoding: utf-8 +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( +    compat_urllib_parse, +    ExtractorError, +) + + +class NaverIE(InfoExtractor): +    _VALID_URL = r'https?://tvcast\.naver\.com/v/(?P<id>\d+)' + +    _TEST = { +        u'url': u'http://tvcast.naver.com/v/81652', +        u'file': u'81652.mp4', +        u'info_dict': { +            u'title': u'[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번', +            u'description': u'합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.', +            u'upload_date': u'20130903', +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group(1) +        webpage = self._download_webpage(url, video_id) +        m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"', +            webpage) +        if m_id is None: +            raise ExtractorError(u'couldn\'t extract vid and key') +        vid = m_id.group(1) +        key = m_id.group(2) +        query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,}) +        query_urls = compat_urllib_parse.urlencode({ +            'masterVid': vid, +            'protocol': 'p2p', +            'inKey': key, +        }) +        info_xml = self._download_webpage( +            'http://serviceapi.rmcnmv.naver.com/flash/videoInfo.nhn?' + query, +            video_id, u'Downloading video info') +        urls_xml = self._download_webpage( +            'http://serviceapi.rmcnmv.naver.com/flash/playableEncodingOption.nhn?' + query_urls, +            video_id, u'Downloading video formats info') +        info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) +        urls = xml.etree.ElementTree.fromstring(urls_xml.encode('utf-8')) + +        formats = [] +        for format_el in urls.findall('EncodingOptions/EncodingOption'): +            domain = format_el.find('Domain').text +            if domain.startswith('rtmp'): +                continue +            formats.append({ +                'url': domain + format_el.find('uri').text, +                'ext': 'mp4', +                'width': int(format_el.find('width').text), +                'height': int(format_el.find('height').text), +            }) + +        info = { +            'id': video_id, +            'title': info.find('Subject').text, +            'formats': formats, +            'description': self._og_search_description(webpage), +            'thumbnail': self._og_search_thumbnail(webpage), +            'upload_date': info.find('WriteDate').text.replace('.', ''), +            'view_count': int(info.find('PlayCount').text), +        } +        # TODO: Remove when #980 has been merged +        info.update(formats[-1]) +        return info diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py index 41ef8e992..cfca2a063 100644 --- a/youtube_dl/extractor/orf.py +++ b/youtube_dl/extractor/orf.py @@ -14,19 +14,6 @@ from ..utils import (  class ORFIE(InfoExtractor):      _VALID_URL = r'https?://tvthek.orf.at/(programs/.+?/episodes|topics/.+?)/(?P<id>\d+)' -    _TEST = { -        u'url': u'http://tvthek.orf.at/programs/1171769-Wetter-ZIB/episodes/6557323-Wetter', -        u'file': u'6566957.flv', -        u'info_dict': { -            u'title': u'Wetter', -            u'description': u'Christa Kummer, Marcus Wadsak und Kollegen  präsentieren abwechselnd ihre täglichen Wetterprognosen für Österreich.\r \r Mehr Wetter unter wetter.ORF.at', -        }, -        u'params': { -            # It uses rtmp -            u'skip_download': True, -        } -    } -      def _real_extract(self, url):          mobj = re.match(self._VALID_URL, url)          playlist_id = mobj.group('id') diff --git a/youtube_dl/extractor/slideshare.py b/youtube_dl/extractor/slideshare.py new file mode 100644 index 000000000..afc3001b5 --- /dev/null +++ b/youtube_dl/extractor/slideshare.py @@ -0,0 +1,47 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( +    compat_urlparse, +    ExtractorError, +) + + +class SlideshareIE(InfoExtractor): +    _VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)' + +    _TEST = { +        u'url': u'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity', +        u'file': u'25665706.mp4', +        u'info_dict': { +            u'title': u'Managing Scale and Complexity', +            u'description': u'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix', +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        page_title = mobj.group('title') +        webpage = self._download_webpage(url, page_title) +        slideshare_obj = self._search_regex( +            r'var slideshare_object =  ({.*?}); var user_info =', +            webpage, u'slideshare object') +        info = json.loads(slideshare_obj) +        if info['slideshow']['type'] != u'video': +            raise ExtractorError(u'Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True) + +        doc = info['doc'] +        bucket = info['jsplayer']['video_bucket'] +        ext = info['jsplayer']['video_extension'] +        video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext) + +        return { +            '_type': 'video', +            'id': info['slideshow']['id'], +            'title': info['slideshow']['title'], +            'ext': ext, +            'url': video_url, +            'thumbnail': info['slideshow']['pin_image_url'], +            'description': self._og_search_description(webpage), +        } diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index 77bb0a8dc..2b9bf0cb7 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -8,7 +8,7 @@ from ..utils import ExtractorError  class SohuIE(InfoExtractor): -    _VALID_URL = r'https?://tv\.sohu\.com/\d+?/n(?P<id>\d+)\.shtml.*?' +    _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'      _TEST = {          u'url': u'http://tv.sohu.com/20130724/n382479172.shtml#super', @@ -21,8 +21,11 @@ class SohuIE(InfoExtractor):      def _real_extract(self, url): -        def _fetch_data(vid_id): -            base_data_url = u'http://hot.vrs.sohu.com/vrs_flash.action?vid=' +        def _fetch_data(vid_id, mytv=False): +            if mytv: +                base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' +            else: +                base_data_url = u'http://hot.vrs.sohu.com/vrs_flash.action?vid='              data_url = base_data_url + str(vid_id)              data_json = self._download_webpage(                  data_url, video_id, @@ -31,15 +34,16 @@ class SohuIE(InfoExtractor):          mobj = re.match(self._VALID_URL, url)          video_id = mobj.group('id') +        mytv = mobj.group('mytv') is not None          webpage = self._download_webpage(url, video_id)          raw_title = self._html_search_regex(r'(?s)<title>(.+?)</title>',                                              webpage, u'video title')          title = raw_title.partition('-')[0].strip() -        vid = self._html_search_regex(r'var vid="(\d+)"', webpage, +        vid = self._html_search_regex(r'var vid ?= ?["\'](\d+)["\']', webpage,                                        u'video path') -        data = _fetch_data(vid) +        data = _fetch_data(vid, mytv)          QUALITIES = ('ori', 'super', 'high', 'nor')          vid_ids = [data['data'][q + 'Vid'] @@ -51,7 +55,7 @@ class SohuIE(InfoExtractor):          # For now, we just pick the highest available quality          vid_id = vid_ids[-1] -        format_data = data if vid == vid_id else _fetch_data(vid_id) +        format_data = data if vid == vid_id else _fetch_data(vid_id, mytv)          part_count = format_data['data']['totalBlocks']          allot = format_data['allot']          prot = format_data['prot'] diff --git a/youtube_dl/extractor/subtitles.py b/youtube_dl/extractor/subtitles.py new file mode 100644 index 000000000..97215f289 --- /dev/null +++ b/youtube_dl/extractor/subtitles.py @@ -0,0 +1,92 @@ +from .common import InfoExtractor + +from ..utils import ( +    compat_str, +    ExtractorError, +) + + +class SubtitlesInfoExtractor(InfoExtractor): +    @property +    def _have_to_download_any_subtitles(self): +        return any([self._downloader.params.get('writesubtitles', False), +                    self._downloader.params.get('writeautomaticsub'), +                    self._downloader.params.get('allsubtitles', False)]) + +    def _list_available_subtitles(self, video_id, webpage=None): +        """ outputs the available subtitles for the video """ +        sub_lang_list = self._get_available_subtitles(video_id) +        auto_captions_list = self._get_available_automatic_caption(video_id, webpage) +        sub_lang = ",".join(list(sub_lang_list.keys())) +        self.to_screen(u'%s: Available subtitles for video: %s' % +                       (video_id, sub_lang)) +        auto_lang = ",".join(auto_captions_list.keys()) +        self.to_screen(u'%s: Available automatic captions for video: %s' % +                       (video_id, auto_lang)) + +    def extract_subtitles(self, video_id, video_webpage=None): +        """ +        returns {sub_lang: sub} ,{} if subtitles not found or None if the +        subtitles aren't requested. +        """ +        if not self._have_to_download_any_subtitles: +            return None +        available_subs_list = {} +        if self._downloader.params.get('writeautomaticsub', False): +            available_subs_list.update(self._get_available_automatic_caption(video_id, video_webpage)) +        if self._downloader.params.get('writesubtitles', False) or self._downloader.params.get('allsubtitles', False): +            available_subs_list.update(self._get_available_subtitles(video_id)) + +        if not available_subs_list:  # error, it didn't get the available subtitles +            return {} +        if self._downloader.params.get('allsubtitles', False): +            sub_lang_list = available_subs_list +        else: +            if self._downloader.params.get('subtitleslangs', False): +                requested_langs = self._downloader.params.get('subtitleslangs') +            elif 'en' in available_subs_list: +                requested_langs = ['en'] +            else: +                requested_langs = [list(available_subs_list.keys())[0]] + +            sub_lang_list = {} +            for sub_lang in requested_langs: +                if not sub_lang in available_subs_list: +                    self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang) +                    continue +                sub_lang_list[sub_lang] = available_subs_list[sub_lang] + +        subtitles = {} +        for sub_lang, url in sub_lang_list.items(): +            subtitle = self._request_subtitle_url(sub_lang, url) +            if subtitle: +                subtitles[sub_lang] = subtitle +        return subtitles + +    def _request_subtitle_url(self, sub_lang, url): +        """ makes the http request for the subtitle """ +        try: +            sub = self._download_webpage(url, None, note=False) +        except ExtractorError as err: +            self._downloader.report_warning(u'unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err))) +            return +        if not sub: +            self._downloader.report_warning(u'Did not fetch video subtitles') +            return +        return sub + +    def _get_available_subtitles(self, video_id): +        """ +        returns {sub_lang: url} or {} if not available +        Must be redefined by the subclasses +        """ +        pass + +    def _get_available_automatic_caption(self, video_id, webpage): +        """ +        returns {sub_lang: url} or {} if not available +        Must be redefined by the subclasses that support automatic captions, +        otherwise it will return {} +        """ +        self._downloader.report_warning(u'Automatic Captions not supported by this server') +        return {} diff --git a/youtube_dl/extractor/veehd.py b/youtube_dl/extractor/veehd.py new file mode 100644 index 000000000..3a99a29c6 --- /dev/null +++ b/youtube_dl/extractor/veehd.py @@ -0,0 +1,56 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( +    compat_urlparse, +    get_element_by_id, +    clean_html, +) + +class VeeHDIE(InfoExtractor): +    _VALID_URL = r'https?://veehd.com/video/(?P<id>\d+)' + +    _TEST = { +        u'url': u'http://veehd.com/video/4686958', +        u'file': u'4686958.mp4', +        u'info_dict': { +            u'title': u'Time Lapse View from Space ( ISS)', +            u'uploader_id': u'spotted', +            u'description': u'md5:f0094c4cf3a72e22bc4e4239ef767ad7', +        }, +    } + +    def _real_extract(self, url): +        mobj = re.match(self._VALID_URL, url) +        video_id = mobj.group('id') + +        webpage = self._download_webpage(url, video_id) +        player_path = self._search_regex(r'\$\("#playeriframe"\).attr\({src : "(.+?)"', +            webpage, u'player path') +        player_url = compat_urlparse.urljoin(url, player_path) +        player_page = self._download_webpage(player_url, video_id, +            u'Downloading player page') +        config_json = self._search_regex(r'value=\'config=({.+?})\'', +            player_page, u'config json') +        config = json.loads(config_json) + +        video_url = compat_urlparse.unquote(config['clip']['url']) +        title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0]) +        uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>', +            webpage, u'uploader') +        thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"', +            webpage, u'thumbnail') +        description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul', +            webpage, u'description', flags=re.DOTALL) + +        return { +            '_type': 'video', +            'id': video_id, +            'title': title, +            'url': video_url, +            'ext': 'mp4', +            'uploader_id': uploader_id, +            'thumbnail': thumbnail, +            'description': description, +        } diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index 512e06e2a..4a7d82b7a 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -44,6 +44,16 @@ class VimeoIE(InfoExtractor):                  u'title': u'Andy Allan - Putting the Carto into OpenStreetMap Cartography',              },          }, +        { +            u'url': u'http://player.vimeo.com/video/54469442', +            u'file': u'54469442.mp4', +            u'md5': u'619b811a4417aa4abe78dc653becf511', +            u'note': u'Videos that embed the url in the player page', +            u'info_dict': { +                u'title': u'Kathy Sierra: Building the minimum Badass User, Business of Software', +                u'uploader': u'The BLN & Business of Software', +            }, +        },      ]      def _login(self): @@ -112,7 +122,8 @@ class VimeoIE(InfoExtractor):          # Extract the config JSON          try: -            config = webpage.split(' = {config:')[1].split(',assets:')[0] +            config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'], +                webpage, u'info section', flags=re.DOTALL)              config = json.loads(config)          except:              if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage): @@ -132,12 +143,22 @@ class VimeoIE(InfoExtractor):          video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None          # Extract video thumbnail -        video_thumbnail = config["video"]["thumbnail"] +        video_thumbnail = config["video"].get("thumbnail") +        if video_thumbnail is None: +            _, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in config["video"]["thumbs"].items())[-1]          # Extract video description -        video_description = get_element_by_attribute("itemprop", "description", webpage) -        if video_description: video_description = clean_html(video_description) -        else: video_description = u'' +        video_description = None +        try: +            video_description = get_element_by_attribute("itemprop", "description", webpage) +            if video_description: video_description = clean_html(video_description) +        except AssertionError as err: +            # On some pages like (http://player.vimeo.com/video/54469442) the +            # html tags are not closed, python 2.6 cannot handle it +            if err.args[0] == 'we should not get here!': +                pass +            else: +                raise          # Extract upload date          video_upload_date = None @@ -154,14 +175,15 @@ class VimeoIE(InfoExtractor):          # TODO bind to format param          codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')]          files = { 'hd': [], 'sd': [], 'other': []} +        config_files = config["video"].get("files") or config["request"].get("files")          for codec_name, codec_extension in codecs: -            if codec_name in config["video"]["files"]: -                if 'hd' in config["video"]["files"][codec_name]: +            if codec_name in config_files: +                if 'hd' in config_files[codec_name]:                      files['hd'].append((codec_name, codec_extension, 'hd')) -                elif 'sd' in config["video"]["files"][codec_name]: +                elif 'sd' in config_files[codec_name]:                      files['sd'].append((codec_name, codec_extension, 'sd'))                  else: -                    files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0])) +                    files['other'].append((codec_name, codec_extension, config_files[codec_name][0]))          for quality in ('hd', 'sd', 'other'):              if len(files[quality]) > 0: @@ -173,8 +195,12 @@ class VimeoIE(InfoExtractor):          else:              raise ExtractorError(u'No known codec found') -        video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ -                    %(video_id, sig, timestamp, video_quality, video_codec.upper()) +        video_url = None +        if isinstance(config_files[video_codec], dict): +            video_url = config_files[video_codec][video_quality].get("url") +        if video_url is None: +            video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ +                        %(video_id, sig, timestamp, video_quality, video_codec.upper())          return [{              'id':       video_id, diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 9e2373bd5..f49665925 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -5,8 +5,10 @@ import netrc  import re  import socket  import itertools +import xml.etree.ElementTree  from .common import InfoExtractor, SearchInfoExtractor +from .subtitles import SubtitlesInfoExtractor  from ..utils import (      compat_http_client,      compat_parse_qs, @@ -130,12 +132,13 @@ class YoutubeBaseInfoExtractor(InfoExtractor):              return          self._confirm_age() -class YoutubeIE(YoutubeBaseInfoExtractor): + +class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):      IE_DESC = u'YouTube.com'      _VALID_URL = r"""^                       (                           (?:https?://)?                                       # http(s):// (optional) -                         (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| +                         (?:(?:(?:(?:\w+\.)?youtube(?:-nocookie)?\.com/|                              tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains                           (?:.*?\#/)?                                          # handle anchor (#/) redirect urls                           (?:                                                  # the various things that can precede the ID: @@ -146,15 +149,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor):                                   (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx)                                   v=                               ) -                         )?                                                   # optional -> youtube.com/xxxx is OK +                         )) +                         |youtu\.be/                                          # just youtu.be/xxxx +                         )                       )?                                                       # all until now is optional -> you can pass the naked ID -                     ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID +                     ([0-9A-Za-z_-]{11})                                      # here is it! the YouTube video ID                       (?(1).+)?                                                # if we found the ID, everything can follow                       $"""      _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'      # Listed in order of quality -    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13', -                          '95', '94', '93', '92', '132', '151', +    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '36', '17', '13', +                          # Apple HTTP Live Streaming +                          '96', '95', '94', '93', '92', '132', '151',                            # 3D                            '85', '84', '102', '83', '101', '82', '100',                            # Dash video @@ -163,8 +169,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):                            # Dash audio                            '141', '172', '140', '171', '139',                            ] -    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13', -                                      '95', '94', '93', '92', '132', '151', +    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '36', '17', '13', +                                      # Apple HTTP Live Streaming +                                      '96', '95', '94', '93', '92', '132', '151', +                                      # 3D                                        '85', '102', '84', '101', '83', '100', '82',                                        # Dash video                                        '138', '248', '137', '247', '136', '246', '245', @@ -172,11 +180,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor):                                        # Dash audio                                        '172', '141', '171', '140', '139',                                        ] +    _video_formats_map = { +        'flv': ['35', '34', '6', '5'], +        '3gp': ['36', '17', '13'], +        'mp4': ['38', '37', '22', '18'], +        'webm': ['46', '45', '44', '43'], +    }      _video_extensions = {          '13': '3gp', -        '17': 'mp4', +        '17': '3gp',          '18': 'mp4',          '22': 'mp4', +        '36': '3gp',          '37': 'mp4',          '38': 'mp4',          '43': 'webm', @@ -193,7 +208,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):          '101': 'webm',          '102': 'webm', -        # videos that use m3u8 +        # Apple HTTP Live Streaming          '92': 'mp4',          '93': 'mp4',          '94': 'mp4', @@ -234,6 +249,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):          '22': '720x1280',          '34': '360x640',          '35': '480x854', +        '36': '240x320',          '37': '1080x1920',          '38': '3072x4096',          '43': '360x640', @@ -373,7 +389,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):      @classmethod      def suitable(cls, url):          """Receives a URL and returns True if suitable for this IE.""" -        if YoutubePlaylistIE.suitable(url) or YoutubeSubscriptionsIE.suitable(url): return False +        if YoutubePlaylistIE.suitable(url): return False          return re.match(cls._VALID_URL, url, re.VERBOSE) is not None      def report_video_webpage_download(self, video_id): @@ -384,19 +400,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):          """Report attempt to download video info webpage."""          self.to_screen(u'%s: Downloading video info webpage' % video_id) -    def report_video_subtitles_download(self, video_id): -        """Report attempt to download video info webpage.""" -        self.to_screen(u'%s: Checking available subtitles' % video_id) - -    def report_video_subtitles_request(self, video_id, sub_lang, format): -        """Report attempt to download video info webpage.""" -        self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format)) - -    def report_video_subtitles_available(self, video_id, sub_lang_list): -        """Report available subtitles.""" -        sub_lang = ",".join(list(sub_lang_list.keys())) -        self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang)) -      def report_information_extraction(self, video_id):          """Report attempt to extract video information."""          self.to_screen(u'%s: Extracting video information' % video_id) @@ -423,15 +426,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor):          elif len(s) == 87:              return s[6:27] + s[4] + s[28:39] + s[27] + s[40:59] + s[2] + s[60:]          elif len(s) == 86: -            return s[83:36:-1] + s[0] + s[35:2:-1] +            return s[5:34] + s[0] + s[35:38] + s[3] + s[39:45] + s[38] + s[46:53] + s[73] + s[54:73] + s[85] + s[74:85] + s[53]          elif len(s) == 85: -            return s[83:34:-1] + s[0] + s[33:27:-1] + s[3] + s[26:19:-1] + s[34] + s[18:3:-1] + s[27] +            return s[40] + s[82:43:-1] + s[22] + s[42:40:-1] + s[83] + s[39:22:-1] + s[0] + s[21:2:-1]          elif len(s) == 84:              return s[81:36:-1] + s[0] + s[35:2:-1]          elif len(s) == 83:              return s[81:64:-1] + s[82] + s[63:52:-1] + s[45] + s[51:45:-1] + s[1] + s[44:1:-1] + s[0]          elif len(s) == 82: -            return s[1:19] + s[0] + s[20:68] + s[19] + s[69:82] +            return s[80:73:-1] + s[81] + s[72:54:-1] + s[2] + s[53:43:-1] + s[0] + s[42:2:-1] + s[43] + s[1] + s[54]          elif len(s) == 81:              return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]          elif len(s) == 80: @@ -451,56 +454,38 @@ class YoutubeIE(YoutubeBaseInfoExtractor):              # Fallback to the other algortihms              return self._decrypt_signature(s) -      def _get_available_subtitles(self, video_id): -        self.report_video_subtitles_download(video_id) -        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)          try: -            sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8') -        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: +            sub_list = self._download_webpage( +                'http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id, +                video_id, note=False) +        except ExtractorError as err:              self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))              return {} -        sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) -        sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list) +        lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) + +        sub_lang_list = {} +        for l in lang_list: +            lang = l[1] +            params = compat_urllib_parse.urlencode({ +                'lang': lang, +                'v': video_id, +                'fmt': self._downloader.params.get('subtitlesformat'), +            }) +            url = u'http://www.youtube.com/api/timedtext?' + params +            sub_lang_list[lang] = url          if not sub_lang_list:              self._downloader.report_warning(u'video doesn\'t have subtitles')              return {}          return sub_lang_list -    def _list_available_subtitles(self, video_id): -        sub_lang_list = self._get_available_subtitles(video_id) -        self.report_video_subtitles_available(video_id, sub_lang_list) - -    def _request_subtitle(self, sub_lang, sub_name, video_id, format): -        """ -        Return the subtitle as a string or None if they are not found -        """ -        self.report_video_subtitles_request(video_id, sub_lang, format) -        params = compat_urllib_parse.urlencode({ -            'lang': sub_lang, -            'name': sub_name, -            'v': video_id, -            'fmt': format, -        }) -        url = 'http://www.youtube.com/api/timedtext?' + params -        try: -            sub = compat_urllib_request.urlopen(url).read().decode('utf-8') -        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: -            self._downloader.report_warning(u'unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err))) -            return -        if not sub: -            self._downloader.report_warning(u'Did not fetch video subtitles') -            return -        return sub - -    def _request_automatic_caption(self, video_id, webpage): +    def _get_available_automatic_caption(self, video_id, webpage):          """We need the webpage for getting the captions url, pass it as an             argument to speed up the process.""" -        sub_lang = (self._downloader.params.get('subtitleslangs') or ['en'])[0]          sub_format = self._downloader.params.get('subtitlesformat')          self.to_screen(u'%s: Looking for automatic captions' % video_id)          mobj = re.search(r';ytplayer.config = ({.*?});', webpage) -        err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang +        err_msg = u'Couldn\'t find automatic captions for %s' % video_id          if mobj is None:              self._downloader.report_warning(err_msg)              return {} @@ -509,53 +494,38 @@ class YoutubeIE(YoutubeBaseInfoExtractor):              args = player_config[u'args']              caption_url = args[u'ttsurl']              timestamp = args[u'timestamp'] -            params = compat_urllib_parse.urlencode({ -                'lang': 'en', -                'tlang': sub_lang, -                'fmt': sub_format, -                'ts': timestamp, -                'kind': 'asr', +            # We get the available subtitles +            list_params = compat_urllib_parse.urlencode({ +                'type': 'list', +                'tlangs': 1, +                'asrs': 1,              }) -            subtitles_url = caption_url + '&' + params -            sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions') -            return {sub_lang: sub} +            list_url = caption_url + '&' + list_params +            list_page = self._download_webpage(list_url, video_id) +            caption_list = xml.etree.ElementTree.fromstring(list_page.encode('utf-8')) +            original_lang_node = caption_list.find('track') +            if original_lang_node.attrib.get('kind') != 'asr' : +                self._downloader.report_warning(u'Video doesn\'t have automatic captions') +                return {} +            original_lang = original_lang_node.attrib['lang_code'] + +            sub_lang_list = {} +            for lang_node in caption_list.findall('target'): +                sub_lang = lang_node.attrib['lang_code'] +                params = compat_urllib_parse.urlencode({ +                    'lang': original_lang, +                    'tlang': sub_lang, +                    'fmt': sub_format, +                    'ts': timestamp, +                    'kind': 'asr', +                }) +                sub_lang_list[sub_lang] = caption_url + '&' + params +            return sub_lang_list          # An extractor error can be raise by the download process if there are          # no automatic captions but there are subtitles          except (KeyError, ExtractorError):              self._downloader.report_warning(err_msg)              return {} -     -    def _extract_subtitles(self, video_id): -        """ -        Return a dictionary: {language: subtitles} or {} if the subtitles -        couldn't be found -        """ -        available_subs_list = self._get_available_subtitles(video_id) -        sub_format = self._downloader.params.get('subtitlesformat') -        if  not available_subs_list: #There was some error, it didn't get the available subtitles -            return {} -        if self._downloader.params.get('allsubtitles', False): -            sub_lang_list = available_subs_list -        else: -            if self._downloader.params.get('subtitleslangs', False): -                reqested_langs = self._downloader.params.get('subtitleslangs') -            elif 'en' in available_subs_list: -                reqested_langs = ['en'] -            else: -                reqested_langs = [list(available_subs_list.keys())[0]] - -            sub_lang_list = {} -            for sub_lang in reqested_langs: -                if not sub_lang in available_subs_list: -                    self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang) -                    continue -                sub_lang_list[sub_lang] = available_subs_list[sub_lang] -        subtitles = {} -        for sub_lang in sub_lang_list: -            subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) -            if subtitle: -                subtitles[sub_lang] = subtitle -        return subtitles      def _print_formats(self, formats):          print('Available formats:') @@ -597,13 +567,25 @@ class YoutubeIE(YoutubeBaseInfoExtractor):              video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats          else:              # Specific formats. We pick the first in a slash-delimeted sequence. -            # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. +            # Format can be specified as itag or 'mp4' or 'flv' etc. We pick the highest quality +            # available in the specified format. For example, +            # if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. +            # if '1/mp4/3/4' is requested and '1' and '5' (is a mp4) are available, we pick '1'. +            # if '1/mp4/3/4' is requested and '4' and '5' (is a mp4) are available, we pick '5'.              req_formats = req_format.split('/')              video_url_list = None              for rf in req_formats:                  if rf in url_map:                      video_url_list = [(rf, url_map[rf])]                      break +                if rf in self._video_formats_map: +                    for srf in self._video_formats_map[rf]: +                        if srf in url_map: +                            video_url_list = [(srf, url_map[srf])] +                            break +                    else: +                        continue +                    break              if video_url_list is None:                  raise ExtractorError(u'requested format not available')          return video_url_list @@ -743,15 +725,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):                  video_description = u''          # subtitles -        video_subtitles = None - -        if self._downloader.params.get('writesubtitles', False) or self._downloader.params.get('allsubtitles', False): -            video_subtitles = self._extract_subtitles(video_id) -        elif self._downloader.params.get('writeautomaticsub', False): -            video_subtitles = self._request_automatic_caption(video_id, video_webpage) +        video_subtitles = self.extract_subtitles(video_id, video_webpage)          if self._downloader.params.get('listsubtitles', False): -            self._list_available_subtitles(video_id) +            self._list_available_subtitles(video_id, video_webpage)              return          if 'length_seconds' not in video_info: @@ -920,8 +897,11 @@ class YoutubePlaylistIE(InfoExtractor):              for entry in response['feed']['entry']:                  index = entry['yt$position']['$t'] -                if 'media$group' in entry and 'media$player' in entry['media$group']: -                    videos.append((index, entry['media$group']['media$player']['url'])) +                if 'media$group' in entry and 'yt$videoid' in entry['media$group']: +                    videos.append(( +                        index, +                        'https://www.youtube.com/watch?v=' + entry['media$group']['yt$videoid']['$t'] +                    ))          videos = [v[1] for v in sorted(videos)] @@ -987,13 +967,20 @@ class YoutubeChannelIE(InfoExtractor):  class YoutubeUserIE(InfoExtractor):      IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)' -    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' +    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?)|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'      _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s'      _GDATA_PAGE_SIZE = 50 -    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' -    _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' +    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'      IE_NAME = u'youtube:user' +    @classmethod +    def suitable(cls, url): +        # Don't return True if the url can be extracted with other youtube +        # extractor, the regex would is too permissive and it would match. +        other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls) +        if any(ie.suitable(url) for ie in other_ies): return False +        else: return super(YoutubeUserIE, cls).suitable(url) +      def _real_extract(self, url):          # Extract username          mobj = re.match(self._VALID_URL, url) @@ -1016,13 +1003,15 @@ class YoutubeUserIE(InfoExtractor):              page = self._download_webpage(gdata_url, username,                                            u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE)) +            try: +                response = json.loads(page) +            except ValueError as err: +                raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err)) +              # Extract video identifiers              ids_in_page = [] - -            for mobj in re.finditer(self._VIDEO_INDICATOR, page): -                if mobj.group(1) not in ids_in_page: -                    ids_in_page.append(mobj.group(1)) - +            for entry in response['feed']['entry']: +                ids_in_page.append(entry['id']['$t'].split('/')[-1])              video_ids.extend(ids_in_page)              # A little optimization - if current page is not @@ -1161,7 +1150,7 @@ class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):  class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):      IE_NAME = u'youtube:favorites'      IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)' -    _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:o?rites)?' +    _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'      _LOGIN_REQUIRED = True      def _real_extract(self, url): diff --git a/youtube_dl/version.py b/youtube_dl/version.py index c28320181..3b2505c77 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,2 +1,2 @@ -__version__ = '2013.08.29' +__version__ = '2013.09.12' | 
