aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGELOG14
-rw-r--r--Makefile2
-rw-r--r--README.md5
-rwxr-xr-xdevscripts/release.sh4
-rw-r--r--test/test_YoutubeDL.py4
-rw-r--r--test/test_age_restriction.py2
-rw-r--r--test/test_all_urls.py3
-rw-r--r--test/test_playlists.py63
-rw-r--r--test/test_youtube_lists.py4
-rw-r--r--test/test_youtube_signature.py12
-rwxr-xr-xyoutube_dl/YoutubeDL.py13
-rw-r--r--youtube_dl/__init__.py8
-rw-r--r--youtube_dl/downloader/hls.py2
-rw-r--r--youtube_dl/downloader/rtmp.py5
-rw-r--r--youtube_dl/extractor/__init__.py35
-rw-r--r--youtube_dl/extractor/aftonbladet.py15
-rw-r--r--youtube_dl/extractor/allocine.py89
-rw-r--r--youtube_dl/extractor/anitube.py32
-rw-r--r--youtube_dl/extractor/ard.py44
-rw-r--r--youtube_dl/extractor/arte.py10
-rw-r--r--youtube_dl/extractor/bandcamp.py38
-rw-r--r--youtube_dl/extractor/bilibili.py4
-rw-r--r--youtube_dl/extractor/blinkx.py42
-rw-r--r--youtube_dl/extractor/bliptv.py186
-rw-r--r--youtube_dl/extractor/br.py12
-rw-r--r--youtube_dl/extractor/brightcove.py50
-rw-r--r--youtube_dl/extractor/cinemassacre.py65
-rw-r--r--youtube_dl/extractor/cmt.py24
-rw-r--r--youtube_dl/extractor/cnn.py10
-rw-r--r--youtube_dl/extractor/comedycentral.py4
-rw-r--r--youtube_dl/extractor/common.py26
-rw-r--r--youtube_dl/extractor/dailymotion.py2
-rw-r--r--youtube_dl/extractor/discovery.py4
-rw-r--r--youtube_dl/extractor/dreisat.py30
-rw-r--r--youtube_dl/extractor/drtv.py91
-rw-r--r--youtube_dl/extractor/empflix.py22
-rw-r--r--youtube_dl/extractor/extremetube.py2
-rw-r--r--youtube_dl/extractor/fc2.py9
-rw-r--r--youtube_dl/extractor/firstpost.py7
-rw-r--r--youtube_dl/extractor/francetv.py22
-rw-r--r--youtube_dl/extractor/gamekings.py2
-rw-r--r--youtube_dl/extractor/generic.py80
-rw-r--r--youtube_dl/extractor/googleplus.py3
-rw-r--r--youtube_dl/extractor/gorillavid.py87
-rw-r--r--youtube_dl/extractor/goshgay.py73
-rw-r--r--youtube_dl/extractor/hypem.py69
-rw-r--r--youtube_dl/extractor/ivi.py14
-rw-r--r--youtube_dl/extractor/ku6.py35
-rw-r--r--youtube_dl/extractor/lifenews.py2
-rw-r--r--youtube_dl/extractor/livestream.py115
-rw-r--r--youtube_dl/extractor/mailru.py58
-rw-r--r--youtube_dl/extractor/motherless.py87
-rw-r--r--youtube_dl/extractor/mpora.py2
-rw-r--r--youtube_dl/extractor/mtv.py40
-rw-r--r--youtube_dl/extractor/naver.py34
-rw-r--r--youtube_dl/extractor/nbc.py79
-rw-r--r--youtube_dl/extractor/ndr.py31
-rw-r--r--youtube_dl/extractor/ndtv.py46
-rw-r--r--youtube_dl/extractor/newstube.py15
-rw-r--r--youtube_dl/extractor/niconico.py105
-rw-r--r--youtube_dl/extractor/ninegag.py2
-rw-r--r--youtube_dl/extractor/noco.py6
-rw-r--r--youtube_dl/extractor/nowness.py9
-rw-r--r--youtube_dl/extractor/npo.py62
-rw-r--r--youtube_dl/extractor/nrk.py81
-rw-r--r--youtube_dl/extractor/ntv.py1
-rw-r--r--youtube_dl/extractor/nuvid.py53
-rw-r--r--youtube_dl/extractor/photobucket.py8
-rw-r--r--youtube_dl/extractor/pornhub.py2
-rw-r--r--youtube_dl/extractor/prosiebensat1.py6
-rw-r--r--youtube_dl/extractor/rai.py122
-rw-r--r--youtube_dl/extractor/slutload.py3
-rw-r--r--youtube_dl/extractor/soundcloud.py24
-rw-r--r--youtube_dl/extractor/soundgasm.py40
-rw-r--r--youtube_dl/extractor/spiegel.py41
-rw-r--r--youtube_dl/extractor/spiegeltv.py81
-rw-r--r--youtube_dl/extractor/steam.py2
-rw-r--r--youtube_dl/extractor/streamcz.py22
-rw-r--r--youtube_dl/extractor/swrmediathek.py104
-rw-r--r--youtube_dl/extractor/tagesschau.py79
-rw-r--r--youtube_dl/extractor/teachertube.py124
-rw-r--r--youtube_dl/extractor/teachingchannel.py33
-rw-r--r--youtube_dl/extractor/ted.py2
-rw-r--r--youtube_dl/extractor/tenplay.py84
-rw-r--r--youtube_dl/extractor/theplatform.py25
-rw-r--r--youtube_dl/extractor/toypics.py7
-rw-r--r--youtube_dl/extractor/tube8.py5
-rw-r--r--youtube_dl/extractor/tumblr.py23
-rw-r--r--youtube_dl/extractor/ustream.py21
-rw-r--r--youtube_dl/extractor/veoh.py8
-rw-r--r--youtube_dl/extractor/vevo.py2
-rw-r--r--youtube_dl/extractor/vh1.py124
-rw-r--r--youtube_dl/extractor/videott.py13
-rw-r--r--youtube_dl/extractor/vimple.py86
-rw-r--r--youtube_dl/extractor/vk.py52
-rw-r--r--youtube_dl/extractor/vodlocker.py63
-rw-r--r--youtube_dl/extractor/vulture.py69
-rw-r--r--youtube_dl/extractor/wdr.py15
-rw-r--r--youtube_dl/extractor/wistia.py19
-rw-r--r--youtube_dl/extractor/wrzuta.py81
-rw-r--r--youtube_dl/extractor/xvideos.py17
-rw-r--r--youtube_dl/extractor/yahoo.py18
-rw-r--r--youtube_dl/extractor/youtube.py140
-rw-r--r--youtube_dl/jsinterp.py4
-rw-r--r--youtube_dl/postprocessor/ffmpeg.py3
-rw-r--r--youtube_dl/postprocessor/xattrpp.py3
-rw-r--r--youtube_dl/utils.py25
-rw-r--r--youtube_dl/version.py2
108 files changed, 3059 insertions, 750 deletions
diff --git a/CHANGELOG b/CHANGELOG
deleted file mode 100644
index 3fa116733..000000000
--- a/CHANGELOG
+++ /dev/null
@@ -1,14 +0,0 @@
-2013.01.02 Codename: GIULIA
-
- * Add support for ComedyCentral clips <nto>
- * Corrected Vimeo description fetching <Nick Daniels>
- * Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
- * --verbose offers more environment info
- * New info_dict field: uploader_id
- * New updates system, with signature checking
- * New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
- * Fixed IEs: BlipTv
- * Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
- * Simplified IEs and test code
- * Various (Python 3 and other) fixes
- * Revamped and expanded tests
diff --git a/Makefile b/Makefile
index a82785861..c079761ef 100644
--- a/Makefile
+++ b/Makefile
@@ -77,6 +77,6 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
--exclude 'docs/_build' \
-- \
bin devscripts test youtube_dl docs \
- CHANGELOG LICENSE README.md README.txt \
+ LICENSE README.md README.txt \
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
youtube-dl
diff --git a/README.md b/README.md
index 2bea609bf..dffdaa9dc 100644
--- a/README.md
+++ b/README.md
@@ -70,8 +70,9 @@ which means you can modify it, redistribute it or use it however you like.
--default-search PREFIX Use this prefix for unqualified URLs. For
example "gvsearch2:" downloads two videos
from google videos for youtube-dl "large
- apple". By default (with value "auto")
- youtube-dl guesses.
+ apple". Use the value "auto" to let
+ youtube-dl guess. The default value "error"
+ just throws an error.
--ignore-config Do not read configuration files. When given
in the global configuration file /etc
/youtube-dl.conf: do not read the user
diff --git a/devscripts/release.sh b/devscripts/release.sh
index 2974a7c3e..453087e5f 100755
--- a/devscripts/release.sh
+++ b/devscripts/release.sh
@@ -45,9 +45,9 @@ fi
/bin/echo -e "\n### Changing version in version.py..."
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
-/bin/echo -e "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
+/bin/echo -e "\n### Committing README.md and youtube_dl/version.py..."
make README.md
-git add CHANGELOG README.md youtube_dl/version.py
+git add README.md youtube_dl/version.py
git commit -m "release $version"
/bin/echo -e "\n### Now tagging, signing and pushing..."
diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py
index 8735013f7..e794cc97f 100644
--- a/test/test_YoutubeDL.py
+++ b/test/test_YoutubeDL.py
@@ -67,7 +67,7 @@ class TestFormatSelection(unittest.TestCase):
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
- # No prefer_free_formats => prefer mp4 and flv for greater compatibilty
+ # No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
@@ -279,7 +279,7 @@ class TestFormatSelection(unittest.TestCase):
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
- }), '^x\s*10k$')
+ }), '^\s*10k$')
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_age_restriction.py b/test/test_age_restriction.py
index c9cdb96cb..71e80b037 100644
--- a/test/test_age_restriction.py
+++ b/test/test_age_restriction.py
@@ -13,7 +13,7 @@ from youtube_dl import YoutubeDL
def _download_restricted(url, filename, age):
- """ Returns true iff the file has been downloaded """
+ """ Returns true if the file has been downloaded """
params = {
'age_limit': age,
diff --git a/test/test_all_urls.py b/test/test_all_urls.py
index 4b56137ce..2bc81f020 100644
--- a/test/test_all_urls.py
+++ b/test/test_all_urls.py
@@ -69,9 +69,6 @@ class TestAllURLsMatching(unittest.TestCase):
def test_youtube_show_matching(self):
self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])
- def test_youtube_truncated(self):
- self.assertMatch('http://www.youtube.com/watch?', ['youtube:truncated_url'])
-
def test_youtube_search_matching(self):
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
diff --git a/test/test_playlists.py b/test/test_playlists.py
index cc871698a..3a88cf270 100644
--- a/test/test_playlists.py
+++ b/test/test_playlists.py
@@ -28,7 +28,9 @@ from youtube_dl.extractor import (
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE,
+ TeacherTubeUserIE,
LivestreamIE,
+ LivestreamOriginalIE,
NHLVideocenterIE,
BambuserChannelIE,
BandcampAlbumIE,
@@ -39,6 +41,7 @@ from youtube_dl.extractor import (
KhanAcademyIE,
EveryonesMixtapeIE,
RutubeChannelIE,
+ RutubePersonIE,
GoogleSearchIE,
GenericIE,
TEDIE,
@@ -113,10 +116,10 @@ class TestPlaylists(unittest.TestCase):
def test_ustream_channel(self):
dl = FakeYDL()
ie = UstreamChannelIE(dl)
- result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
+ result = ie.extract('http://www.ustream.tv/channel/channeljapan')
self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '5124905')
- self.assertTrue(len(result['entries']) >= 6)
+ self.assertEqual(result['id'], '10874166')
+ self.assertTrue(len(result['entries']) >= 54)
def test_soundcloud_set(self):
dl = FakeYDL()
@@ -134,6 +137,14 @@ class TestPlaylists(unittest.TestCase):
self.assertEqual(result['id'], '9615865')
self.assertTrue(len(result['entries']) >= 12)
+ def test_soundcloud_likes(self):
+ dl = FakeYDL()
+ ie = SoundcloudUserIE(dl)
+ result = ie.extract('https://soundcloud.com/the-concept-band/likes')
+ self.assertIsPlaylist(result)
+ self.assertEqual(result['id'], '9615865')
+ self.assertTrue(len(result['entries']) >= 1)
+
def test_soundcloud_playlist(self):
dl = FakeYDL()
ie = SoundcloudPlaylistIE(dl)
@@ -153,6 +164,14 @@ class TestPlaylists(unittest.TestCase):
self.assertEqual(result['title'], 'TEDCity2.0 (English)')
self.assertTrue(len(result['entries']) >= 4)
+ def test_livestreamoriginal_folder(self):
+ dl = FakeYDL()
+ ie = LivestreamOriginalIE(dl)
+ result = ie.extract('https://www.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3')
+ self.assertIsPlaylist(result)
+ self.assertEqual(result['id'], 'a07bf706-d0e4-4e75-a747-b021d84f2fd3')
+ self.assertTrue(len(result['entries']) >= 28)
+
def test_nhl_videocenter(self):
dl = FakeYDL()
ie = NHLVideocenterIE(dl)
@@ -209,20 +228,20 @@ class TestPlaylists(unittest.TestCase):
def test_ivi_compilation(self):
dl = FakeYDL()
ie = IviCompilationIE(dl)
- result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel')
+ result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa')
self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'dezhurnyi_angel')
- self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)')
- self.assertTrue(len(result['entries']) >= 23)
+ self.assertEqual(result['id'], 'dvoe_iz_lartsa')
+ self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008)')
+ self.assertTrue(len(result['entries']) >= 24)
def test_ivi_compilation_season(self):
dl = FakeYDL()
ie = IviCompilationIE(dl)
- result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2')
+ result = ie.extract('http://www.ivi.ru/watch/dvoe_iz_lartsa/season1')
self.assertIsPlaylist(result)
- self.assertEqual(result['id'], 'dezhurnyi_angel/season2')
- self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон')
- self.assertTrue(len(result['entries']) >= 7)
+ self.assertEqual(result['id'], 'dvoe_iz_lartsa/season1')
+ self.assertEqual(result['title'], 'Двое из ларца (2006 - 2008) 1 сезон')
+ self.assertTrue(len(result['entries']) >= 12)
def test_imdb_list(self):
dl = FakeYDL()
@@ -255,10 +274,18 @@ class TestPlaylists(unittest.TestCase):
def test_rutube_channel(self):
dl = FakeYDL()
ie = RutubeChannelIE(dl)
- result = ie.extract('http://rutube.ru/tags/video/1409')
+ result = ie.extract('http://rutube.ru/tags/video/1800/')
+ self.assertIsPlaylist(result)
+ self.assertEqual(result['id'], '1800')
+ self.assertTrue(len(result['entries']) >= 68)
+
+ def test_rutube_person(self):
+ dl = FakeYDL()
+ ie = RutubePersonIE(dl)
+ result = ie.extract('http://rutube.ru/video/person/313878/')
self.assertIsPlaylist(result)
- self.assertEqual(result['id'], '1409')
- self.assertTrue(len(result['entries']) >= 34)
+ self.assertEqual(result['id'], '313878')
+ self.assertTrue(len(result['entries']) >= 37)
def test_multiple_brightcove_videos(self):
# https://github.com/rg3/youtube-dl/issues/2283
@@ -360,5 +387,13 @@ class TestPlaylists(unittest.TestCase):
result['title'], 'Brace Yourself - Today\'s Weirdest News')
self.assertTrue(len(result['entries']) >= 10)
+ def test_TeacherTubeUser(self):
+ dl = FakeYDL()
+ ie = TeacherTubeUserIE(dl)
+ result = ie.extract('http://www.teachertube.com/user/profile/rbhagwati2')
+ self.assertIsPlaylist(result)
+ self.assertEqual(result['id'], 'rbhagwati2')
+ self.assertTrue(len(result['entries']) >= 179)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py
index 7d3b9c705..3aadedd64 100644
--- a/test/test_youtube_lists.py
+++ b/test/test_youtube_lists.py
@@ -112,11 +112,11 @@ class TestYoutubeLists(unittest.TestCase):
def test_youtube_mix(self):
dl = FakeYDL()
ie = YoutubePlaylistIE(dl)
- result = ie.extract('http://www.youtube.com/watch?v=lLJf9qJHR3E&list=RDrjFaenf1T-Y')
+ result = ie.extract('https://www.youtube.com/watch?v=W01L70IGBgE&index=2&list=RDOQpdSVF_k_w')
entries = result['entries']
self.assertTrue(len(entries) >= 20)
original_video = entries[0]
- self.assertEqual(original_video['id'], 'rjFaenf1T-Y')
+ self.assertEqual(original_video['id'], 'OQpdSVF_k_w')
def test_youtube_toptracks(self):
print('Skipping: The playlist page gives error 500')
diff --git a/test/test_youtube_signature.py b/test/test_youtube_signature.py
index 8417c55a6..8d46fe108 100644
--- a/test/test_youtube_signature.py
+++ b/test/test_youtube_signature.py
@@ -33,6 +33,12 @@ _TESTS = [
90,
u']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
),
+ (
+ u'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
+ u'js',
+ u'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
+ u'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
+ ),
]
@@ -44,7 +50,7 @@ class TestSignature(unittest.TestCase):
os.mkdir(self.TESTDATA_DIR)
-def make_tfunc(url, stype, sig_length, expected_sig):
+def make_tfunc(url, stype, sig_input, expected_sig):
basename = url.rpartition('/')[2]
m = re.match(r'.*-([a-zA-Z0-9_-]+)\.[a-z]+$', basename)
assert m, '%r should follow URL format' % basename
@@ -66,7 +72,9 @@ def make_tfunc(url, stype, sig_length, expected_sig):
with open(fn, 'rb') as testf:
swfcode = testf.read()
func = ie._parse_sig_swf(swfcode)
- src_sig = compat_str(string.printable[:sig_length])
+ src_sig = (
+ compat_str(string.printable[:sig_input])
+ if isinstance(sig_input, int) else sig_input)
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index f3666573a..3dff723b8 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -717,6 +717,17 @@ class YoutubeDL(object):
info_dict['playlist'] = None
info_dict['playlist_index'] = None
+ thumbnails = info_dict.get('thumbnails')
+ if thumbnails:
+ thumbnails.sort(key=lambda t: (
+ t.get('width'), t.get('height'), t.get('url')))
+ for t in thumbnails:
+ if 'width' in t and 'height' in t:
+ t['resolution'] = '%dx%d' % (t['width'], t['height'])
+
+ if thumbnails and 'thumbnail' not in info_dict:
+ info_dict['thumbnail'] = thumbnails[-1]['url']
+
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
@@ -982,6 +993,8 @@ class YoutubeDL(object):
fd = get_suitable_downloader(info)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
+ if self.params.get('verbose'):
+ self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
return fd.download(name, info)
if info_dict.get('requested_formats') is not None:
downloaded = []
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 4e657e297..c1f8a401e 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -56,6 +56,12 @@ __authors__ = (
'Nicolas Évrard',
'Jason Normore',
'Hoje Lee',
+ 'Adam Thalhammer',
+ 'Georg Jähnig',
+ 'Ralf Haring',
+ 'Koki Takahashi',
+ 'Ariset Llerena',
+ 'Adam Malcontenti-Wilson',
)
__license__ = 'Public Domain'
@@ -266,7 +272,7 @@ def parseOpts(overrideArguments=None):
general.add_option(
'--default-search',
dest='default_search', metavar='PREFIX',
- help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
+ help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess. The default value "error" just throws an error.')
general.add_option(
'--ignore-config',
action='store_true',
diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py
index 9d407fe6e..9f29e2f81 100644
--- a/youtube_dl/downloader/hls.py
+++ b/youtube_dl/downloader/hls.py
@@ -25,7 +25,7 @@ class HlsFD(FileDownloader):
except (OSError, IOError):
pass
else:
- self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found')
+ self.report_error(u'm3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
cmd = [program] + args
retval = subprocess.call(cmd)
diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py
index 78b1e7cd2..68646709a 100644
--- a/youtube_dl/downloader/rtmp.py
+++ b/youtube_dl/downloader/rtmp.py
@@ -96,6 +96,7 @@ class RtmpFD(FileDownloader):
flash_version = info_dict.get('flash_version', None)
live = info_dict.get('rtmp_live', False)
conn = info_dict.get('rtmp_conn', None)
+ protocol = info_dict.get('rtmp_protocol', None)
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
@@ -105,7 +106,7 @@ class RtmpFD(FileDownloader):
try:
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError):
- self.report_error('RTMP download detected but "rtmpdump" could not be run')
+ self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
@@ -133,6 +134,8 @@ class RtmpFD(FileDownloader):
basic_args += ['--conn', entry]
elif isinstance(conn, compat_str):
basic_args += ['--conn', conn]
+ if protocol is not None:
+ basic_args += ['--protocol', protocol]
args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)]
if sys.platform == 'win32' and sys.version_info < (3, 0):
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index a294f66ae..fcc7d0b58 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -3,6 +3,7 @@ from .addanime import AddAnimeIE
from .aftonbladet import AftonbladetIE
from .anitube import AnitubeIE
from .aol import AolIE
+from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
@@ -63,6 +64,7 @@ from .dailymotion import (
from .daum import DaumIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
+from .drtv import DRTVIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
@@ -110,6 +112,8 @@ from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
+from .gorillavid import GorillaVidIE
+from .goshgay import GoshgayIE
from .hark import HarkIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
@@ -143,10 +147,15 @@ from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
+from .ku6 import Ku6IE
from .la7 import LA7IE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
-from .livestream import LivestreamIE, LivestreamOriginalIE
+from .livestream import (
+ LivestreamIE,
+ LivestreamOriginalIE,
+ LivestreamShortenerIE,
+)
from .lynda import (
LyndaIE,
LyndaCourseIE
@@ -164,11 +173,13 @@ from .mpora import MporaIE
from .mofosex import MofosexIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
+from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
+ MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .musicplayon import MusicPlayOnIE
@@ -195,7 +206,11 @@ from .normalboots import NormalbootsIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
-from .nrk import NRKIE
+from .npo import NPOIE
+from .nrk import (
+ NRKIE,
+ NRKTVIE,
+)
from .ntv import NTVIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
@@ -213,6 +228,7 @@ from .pornotube import PornotubeIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
from .radiofrance import RadioFranceIE
+from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .ringtv import RingTVIE
@@ -249,6 +265,7 @@ from .soundcloud import (
SoundcloudUserIE,
SoundcloudPlaylistIE
)
+from .soundgasm import SoundgasmIE
from .southparkstudios import (
SouthParkStudiosIE,
SouthparkDeIE,
@@ -256,16 +273,25 @@ from .southparkstudios import (
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
+from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
+from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
+from .tagesschau import TagesschauIE
+from .teachertube import (
+ TeacherTubeIE,
+ TeacherTubeUserIE,
+)
+from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
+from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .theplatform import ThePlatformIE
@@ -295,6 +321,7 @@ from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
+from .vh1 import VH1IE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
@@ -312,14 +339,17 @@ from .vimeo import (
VimeoReviewIE,
VimeoWatchLaterIE,
)
+from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import VikiIE
from .vk import VKIE
+from .vodlocker import VodlockerIE
from .vube import VubeIE
from .vuclip import VuClipIE
+from .vulture import VultureIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wdr import (
@@ -331,6 +361,7 @@ from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
+from .wrzuta import WrzutaIE
from .xbef import XBefIE
from .xhamster import XHamsterIE
from .xnxx import XNXXIE
diff --git a/youtube_dl/extractor/aftonbladet.py b/youtube_dl/extractor/aftonbladet.py
index 6a8cd14c9..cfc7370ae 100644
--- a/youtube_dl/extractor/aftonbladet.py
+++ b/youtube_dl/extractor/aftonbladet.py
@@ -1,7 +1,6 @@
# encoding: utf-8
from __future__ import unicode_literals
-import datetime
import re
from .common import InfoExtractor
@@ -16,6 +15,7 @@ class AftonbladetIE(InfoExtractor):
'ext': 'mp4',
'title': 'Vulkanutbrott i rymden - nu släpper NASA bilderna',
'description': 'Jupiters måne mest aktiv av alla himlakroppar',
+ 'timestamp': 1394142732,
'upload_date': '20140306',
},
}
@@ -27,17 +27,17 @@ class AftonbladetIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
# find internal video meta data
- META_URL = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json'
+ meta_url = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json'
internal_meta_id = self._html_search_regex(
r'data-aptomaId="([\w\d]+)"', webpage, 'internal_meta_id')
- internal_meta_url = META_URL % internal_meta_id
+ internal_meta_url = meta_url % internal_meta_id
internal_meta_json = self._download_json(
internal_meta_url, video_id, 'Downloading video meta data')
# find internal video formats
- FORMATS_URL = 'http://aftonbladet-play.videodata.drvideo.aptoma.no/actions/video/?id=%s'
+ format_url = 'http://aftonbladet-play.videodata.drvideo.aptoma.no/actions/video/?id=%s'
internal_video_id = internal_meta_json['videoId']
- internal_formats_url = FORMATS_URL % internal_video_id
+ internal_formats_url = format_url % internal_video_id
internal_formats_json = self._download_json(
internal_formats_url, video_id, 'Downloading video formats')
@@ -54,16 +54,13 @@ class AftonbladetIE(InfoExtractor):
})
self._sort_formats(formats)
- timestamp = datetime.datetime.fromtimestamp(internal_meta_json['timePublished'])
- upload_date = timestamp.strftime('%Y%m%d')
-
return {
'id': video_id,
'title': internal_meta_json['title'],
'formats': formats,
'thumbnail': internal_meta_json['imageUrl'],
'description': internal_meta_json['shortPreamble'],
- 'upload_date': upload_date,
+ 'timestamp': internal_meta_json['timePublished'],
'duration': internal_meta_json['duration'],
'view_count': internal_meta_json['views'],
}
diff --git a/youtube_dl/extractor/allocine.py b/youtube_dl/extractor/allocine.py
new file mode 100644
index 000000000..34f0cd49b
--- /dev/null
+++ b/youtube_dl/extractor/allocine.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_str,
+ qualities,
+ determine_ext,
+)
+
+
+class AllocineIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?allocine\.fr/(?P<typ>article|video|film)/(fichearticle_gen_carticle=|player_gen_cmedia=|fichefilm_gen_cfilm=)(?P<id>[0-9]+)(?:\.html)?'
+
+ _TESTS = [{
+ 'url': 'http://www.allocine.fr/article/fichearticle_gen_carticle=18635087.html',
+ 'md5': '0c9fcf59a841f65635fa300ac43d8269',
+ 'info_dict': {
+ 'id': '19546517',
+ 'ext': 'mp4',
+ 'title': 'Astérix - Le Domaine des Dieux Teaser VF',
+ 'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.allocine.fr/video/player_gen_cmedia=19540403&cfilm=222257.html',
+ 'md5': 'd0cdce5d2b9522ce279fdfec07ff16e0',
+ 'info_dict': {
+ 'id': '19540403',
+ 'ext': 'mp4',
+ 'title': 'Planes 2 Bande-annonce VF',
+ 'description': 'md5:c4b1f7bd682a91de6491ada267ec0f4d',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.allocine.fr/film/fichefilm_gen_cfilm=181290.html',
+ 'md5': '101250fb127ef9ca3d73186ff22a47ce',
+ 'info_dict': {
+ 'id': '19544709',
+ 'ext': 'mp4',
+ 'title': 'Dragons 2 - Bande annonce finale VF',
+ 'description': 'md5:e74a4dc750894bac300ece46c7036490',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ typ = mobj.group('typ')
+ display_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, display_id)
+
+ if typ == 'film':
+ video_id = self._search_regex(r'href="/video/player_gen_cmedia=([0-9]+).+"', webpage, 'video id')
+ else:
+ player = self._search_regex(r'data-player=\'([^\']+)\'>', webpage, 'data player')
+
+ player_data = json.loads(player)
+ video_id = compat_str(player_data['refMedia'])
+
+ xml = self._download_xml('http://www.allocine.fr/ws/AcVisiondataV4.ashx?media=%s' % video_id, display_id)
+
+ video = xml.find('.//AcVisionVideo').attrib
+ quality = qualities(['ld', 'md', 'hd'])
+
+ formats = []
+ for k, v in video.items():
+ if re.match(r'.+_path', k):
+ format_id = k.split('_')[0]
+ formats.append({
+ 'format_id': format_id,
+ 'quality': quality(format_id),
+ 'url': v,
+ 'ext': determine_ext(v),
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': video['videoTitle'],
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'formats': formats,
+ 'description': self._og_search_description(webpage),
+ }
diff --git a/youtube_dl/extractor/anitube.py b/youtube_dl/extractor/anitube.py
index 2b019daa9..31f0d417c 100644
--- a/youtube_dl/extractor/anitube.py
+++ b/youtube_dl/extractor/anitube.py
@@ -1,22 +1,24 @@
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
class AnitubeIE(InfoExtractor):
- IE_NAME = u'anitube.se'
+ IE_NAME = 'anitube.se'
_VALID_URL = r'https?://(?:www\.)?anitube\.se/video/(?P<id>\d+)'
_TEST = {
- u'url': u'http://www.anitube.se/video/36621',
- u'md5': u'59d0eeae28ea0bc8c05e7af429998d43',
- u'file': u'36621.mp4',
- u'info_dict': {
- u'id': u'36621',
- u'ext': u'mp4',
- u'title': u'Recorder to Randoseru 01',
+ 'url': 'http://www.anitube.se/video/36621',
+ 'md5': '59d0eeae28ea0bc8c05e7af429998d43',
+ 'info_dict': {
+ 'id': '36621',
+ 'ext': 'mp4',
+ 'title': 'Recorder to Randoseru 01',
+ 'duration': 180.19,
},
- u'skip': u'Blocked in the US',
+ 'skip': 'Blocked in the US',
}
def _real_extract(self, url):
@@ -24,13 +26,15 @@ class AnitubeIE(InfoExtractor):
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
- key = self._html_search_regex(r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)',
- webpage, u'key')
+ key = self._html_search_regex(
+ r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key')
- config_xml = self._download_xml('http://www.anitube.se/nuevo/econfig.php?key=%s' % key,
- key)
+ config_xml = self._download_xml(
+ 'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key)
video_title = config_xml.find('title').text
+ thumbnail = config_xml.find('image').text
+ duration = float(config_xml.find('duration').text)
formats = []
video_url = config_xml.find('file')
@@ -49,5 +53,7 @@ class AnitubeIE(InfoExtractor):
return {
'id': video_id,
'title': video_title,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
'formats': formats
}
diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
index b88f71bc4..b36a4d46a 100644
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -38,37 +38,43 @@ class ARDIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
- r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>', webpage, 'title')
+ [r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
+ r'<meta name="dcterms.title" content="(.*?)"/>',
+ r'<h4 class="headline">(.*?)</h4>'],
+ webpage, 'title')
description = self._html_search_meta(
'dcterms.abstract', webpage, 'description')
thumbnail = self._og_search_thumbnail(webpage)
- streams = [
- mo.groupdict()
- for mo in re.finditer(
- r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)', webpage)]
+
+ media_info = self._download_json(
+ 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
+ # The second element of the _mediaArray contains the standard http urls
+ streams = media_info['_mediaArray'][1]['_mediaStreamArray']
if not streams:
if '"fsk"' in webpage:
raise ExtractorError('This video is only available after 20:00')
formats = []
+
for s in streams:
+ if type(s['_stream']) == list:
+ for index, url in enumerate(s['_stream'][::-1]):
+ quality = s['_quality'] + index
+ formats.append({
+ 'quality': quality,
+ 'url': url,
+ 'format_id': '%s-%s' % (determine_ext(url), quality)
+ })
+ continue
+
format = {
- 'quality': int(s['quality']),
+ 'quality': s['_quality'],
+ 'url': s['_stream'],
}
- if s.get('rtmp_url'):
- format['protocol'] = 'rtmp'
- format['url'] = s['rtmp_url']
- format['playpath'] = s['video_url']
- else:
- format['url'] = s['video_url']
-
- quality_name = self._search_regex(
- r'[,.]([a-zA-Z0-9_-]+),?\.mp4', format['url'],
- 'quality name', default='NA')
- format['format_id'] = '%s-%s-%s-%s' % (
- determine_ext(format['url']), quality_name, s['media_type'],
- s['quality'])
+
+ format['format_id'] = '%s-%s' % (
+ determine_ext(format['url']), format['quality'])
formats.append(format)
diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py
index b528a9ec5..9591bad8a 100644
--- a/youtube_dl/extractor/arte.py
+++ b/youtube_dl/extractor/arte.py
@@ -39,7 +39,10 @@ class ArteTvIE(InfoExtractor):
formats = [{
'forma_id': q.attrib['quality'],
- 'url': q.text,
+ # The playpath starts at 'mp4:', if we don't manually
+ # split the url, rtmpdump will incorrectly parse them
+ 'url': q.text.split('mp4:', 1)[0],
+ 'play_path': 'mp4:' + q.text.split('mp4:', 1)[1],
'ext': 'flv',
'quality': 2 if q.attrib['quality'] == 'hd' else 1,
} for q in config.findall('./urls/url')]
@@ -111,7 +114,7 @@ class ArteTVPlus7IE(InfoExtractor):
if not formats:
# Some videos are only available in the 'Originalversion'
# they aren't tagged as being in French or German
- if all(f['versionCode'] == 'VO' for f in all_formats):
+ if all(f['versionCode'] == 'VO' or f['versionCode'] == 'VA' for f in all_formats):
formats = all_formats
else:
raise ExtractorError(u'The formats list is empty')
@@ -189,9 +192,10 @@ class ArteTVFutureIE(ArteTVPlus7IE):
_TEST = {
'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
'info_dict': {
- 'id': '050940-003',
+ 'id': '5201',
'ext': 'mp4',
'title': 'Les champignons au secours de la planète',
+ 'upload_date': '20131101',
},
}
diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py
index 929aafdff..dcbbdef43 100644
--- a/youtube_dl/extractor/bandcamp.py
+++ b/youtube_dl/extractor/bandcamp.py
@@ -19,7 +19,7 @@ class BandcampIE(InfoExtractor):
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
- "duration": 10,
+ "duration": 9.8485,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}]
@@ -28,36 +28,32 @@ class BandcampIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
- # We get the link to the free download page
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
- if m_download is None:
+ if not m_download:
m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
if m_trackinfo:
json_code = m_trackinfo.group(1)
- data = json.loads(json_code)
- d = data[0]
+ data = json.loads(json_code)[0]
- duration = int(round(d['duration']))
formats = []
- for format_id, format_url in d['file'].items():
- ext, _, abr_str = format_id.partition('-')
-
+ for format_id, format_url in data['file'].items():
+ ext, abr_str = format_id.split('-', 1)
formats.append({
'format_id': format_id,
'url': format_url,
- 'ext': format_id.partition('-')[0],
+ 'ext': ext,
'vcodec': 'none',
- 'acodec': format_id.partition('-')[0],
- 'abr': int(format_id.partition('-')[2]),
+ 'acodec': ext,
+ 'abr': int(abr_str),
})
self._sort_formats(formats)
return {
- 'id': compat_str(d['id']),
- 'title': d['title'],
+ 'id': compat_str(data['id']),
+ 'title': data['title'],
'formats': formats,
- 'duration': duration,
+ 'duration': float(data['duration']),
}
else:
raise ExtractorError('No free songs found')
@@ -67,11 +63,9 @@ class BandcampIE(InfoExtractor):
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
webpage, re.MULTILINE | re.DOTALL).group('id')
- download_webpage = self._download_webpage(download_link, video_id,
- 'Downloading free downloads page')
- # We get the dictionary of the track from some javascrip code
- info = re.search(r'items: (.*?),$',
- download_webpage, re.MULTILINE).group(1)
+ download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
+ # We get the dictionary of the track from some javascript code
+ info = re.search(r'items: (.*?),$', download_webpage, re.MULTILINE).group(1)
info = json.loads(info)[0]
# We pick mp3-320 for now, until format selection can be easily implemented.
mp3_info = info['downloads']['mp3-320']
@@ -100,7 +94,7 @@ class BandcampIE(InfoExtractor):
class BandcampAlbumIE(InfoExtractor):
IE_NAME = 'Bandcamp:album'
- _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+))?'
+ _VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+))'
_TEST = {
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
@@ -123,7 +117,7 @@ class BandcampAlbumIE(InfoExtractor):
'params': {
'playlistend': 2
},
- 'skip': 'Bancamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
+ 'skip': 'Bandcamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/bilibili.py b/youtube_dl/extractor/bilibili.py
index 45067b944..0d5889f5d 100644
--- a/youtube_dl/extractor/bilibili.py
+++ b/youtube_dl/extractor/bilibili.py
@@ -13,7 +13,7 @@ from ..utils import (
class BiliBiliIE(InfoExtractor):
- _VALID_URL = r'http://www\.bilibili\.tv/video/av(?P<id>[0-9]+)/'
+ _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/'
_TEST = {
'url': 'http://www.bilibili.tv/video/av1074402/',
@@ -56,7 +56,7 @@ class BiliBiliIE(InfoExtractor):
'thumbnailUrl', video_code, 'thumbnail', fatal=False)
player_params = compat_parse_qs(self._html_search_regex(
- r'<iframe .*?class="player" src="https://secure.bilibili.tv/secure,([^"]+)"',
+ r'<iframe .*?class="player" src="https://secure\.bilibili\.(?:tv|com)/secure,([^"]+)"',
webpage, 'player params'))
if 'cid' in player_params:
diff --git a/youtube_dl/extractor/blinkx.py b/youtube_dl/extractor/blinkx.py
index 96408e4e0..7d558e262 100644
--- a/youtube_dl/extractor/blinkx.py
+++ b/youtube_dl/extractor/blinkx.py
@@ -1,13 +1,10 @@
from __future__ import unicode_literals
-import datetime
import json
import re
from .common import InfoExtractor
-from ..utils import (
- remove_start,
-)
+from ..utils import remove_start
class BlinkxIE(InfoExtractor):
@@ -16,18 +13,21 @@ class BlinkxIE(InfoExtractor):
_TEST = {
'url': 'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB',
- 'file': '8aQUy7GV.mp4',
'md5': '2e9a07364af40163a908edbf10bb2492',
'info_dict': {
- "title": "Police Car Rolls Away",
- "uploader": "stupidvideos.com",
- "upload_date": "20131215",
- "description": "A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!",
- "duration": 14.886,
- "thumbnails": [{
- "width": 100,
- "height": 76,
- "url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg",
+ 'id': '8aQUy7GV',
+ 'ext': 'mp4',
+ 'title': 'Police Car Rolls Away',
+ 'uploader': 'stupidvideos.com',
+ 'upload_date': '20131215',
+ 'timestamp': 1387068000,
+ 'description': 'A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!',
+ 'duration': 14.886,
+ 'thumbnails': [{
+ 'width': 100,
+ 'height': 76,
+ 'resolution': '100x76',
+ 'url': 'http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg',
}],
},
}
@@ -37,13 +37,10 @@ class BlinkxIE(InfoExtractor):
video_id = m.group('id')
display_id = video_id[:8]
- api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' +
+ api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' +
'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0]
- dt = datetime.datetime.fromtimestamp(data['pubdate_epoch'])
- pload_date = dt.strftime('%Y%m%d')
-
duration = None
thumbnails = []
formats = []
@@ -58,16 +55,13 @@ class BlinkxIE(InfoExtractor):
duration = m['d']
elif m['type'] == 'youtube':
yt_id = m['link']
- self.to_screen(u'Youtube video detected: %s' % yt_id)
+ self.to_screen('Youtube video detected: %s' % yt_id)
return self.url_result(yt_id, 'Youtube', video_id=yt_id)
elif m['type'] in ('flv', 'mp4'):
vcodec = remove_start(m['vcodec'], 'ff')
acodec = remove_start(m['acodec'], 'ff')
tbr = (int(m['vbr']) + int(m['abr'])) // 1000
- format_id = (u'%s-%sk-%s' %
- (vcodec,
- tbr,
- m['w']))
+ format_id = '%s-%sk-%s' % (vcodec, tbr, m['w'])
formats.append({
'format_id': format_id,
'url': m['link'],
@@ -88,7 +82,7 @@ class BlinkxIE(InfoExtractor):
'title': data['title'],
'formats': formats,
'uploader': data['channel_name'],
- 'upload_date': pload_date,
+ 'timestamp': data['pubdate_epoch'],
'description': data.get('description'),
'thumbnails': thumbnails,
'duration': duration,
diff --git a/youtube_dl/extractor/bliptv.py b/youtube_dl/extractor/bliptv.py
index a26001bb3..acfc4ad73 100644
--- a/youtube_dl/extractor/bliptv.py
+++ b/youtube_dl/extractor/bliptv.py
@@ -1,102 +1,124 @@
from __future__ import unicode_literals
-import datetime
import re
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
- compat_str,
compat_urllib_request,
-
unescapeHTML,
+ parse_iso8601,
+ compat_urlparse,
+ clean_html,
+ compat_str,
)
class BlipTVIE(SubtitlesInfoExtractor):
- """Information extractor for blip.tv"""
-
- _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$'
-
- _TESTS = [{
- 'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
- 'md5': 'c6934ad0b6acf2bd920720ec888eb812',
- 'info_dict': {
- 'id': '5779306',
- 'ext': 'mov',
- 'upload_date': '20111205',
- 'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
- 'uploader': 'Comic Book Resources - CBR TV',
- 'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
- }
- }, {
- # https://github.com/rg3/youtube-dl/pull/2274
- 'note': 'Video with subtitles',
- 'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
- 'md5': '309f9d25b820b086ca163ffac8031806',
- 'info_dict': {
- 'id': '6586561',
- 'ext': 'mp4',
- 'uploader': 'Red vs. Blue',
- 'description': 'One-Zero-One',
- 'upload_date': '20130614',
- 'title': 'Red vs. Blue Season 11 Episode 1',
+ _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+]+)))'
+
+ _TESTS = [
+ {
+ 'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
+ 'md5': 'c6934ad0b6acf2bd920720ec888eb812',
+ 'info_dict': {
+ 'id': '5779306',
+ 'ext': 'mov',
+ 'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
+ 'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
+ 'timestamp': 1323138843,
+ 'upload_date': '20111206',
+ 'uploader': 'cbr',
+ 'uploader_id': '679425',
+ 'duration': 81,
+ }
+ },
+ {
+ # https://github.com/rg3/youtube-dl/pull/2274
+ 'note': 'Video with subtitles',
+ 'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
+ 'md5': '309f9d25b820b086ca163ffac8031806',
+ 'info_dict': {
+ 'id': '6586561',
+ 'ext': 'mp4',
+ 'title': 'Red vs. Blue Season 11 Episode 1',
+ 'description': 'One-Zero-One',
+ 'timestamp': 1371261608,
+ 'upload_date': '20130615',
+ 'uploader': 'redvsblue',
+ 'uploader_id': '792887',
+ 'duration': 279,
+ }
}
- }]
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- presumptive_id = mobj.group('presumptive_id')
+ lookup_id = mobj.group('lookup_id')
# See https://github.com/rg3/youtube-dl/issues/857
- embed_mobj = re.match(r'https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', url)
- if embed_mobj:
- info_url = 'http://blip.tv/play/%s.x?p=1' % embed_mobj.group(1)
- info_page = self._download_webpage(info_url, embed_mobj.group(1))
- video_id = self._search_regex(
- r'data-episode-id="([0-9]+)', info_page, 'video_id')
- return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV')
-
- cchar = '&' if '?' in url else '?'
- json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
- request = compat_urllib_request.Request(json_url)
- request.add_header('User-Agent', 'iTunes/10.6.1')
-
- json_data = self._download_json(request, video_id=presumptive_id)
-
- if 'Post' in json_data:
- data = json_data['Post']
+ if lookup_id:
+ info_page = self._download_webpage(
+ 'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id')
+ video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id')
else:
- data = json_data
+ video_id = mobj.group('id')
+
+ rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS')
+
+ def blip(s):
+ return '{http://blip.tv/dtd/blip/1.0}%s' % s
+
+ def media(s):
+ return '{http://search.yahoo.com/mrss/}%s' % s
+
+ def itunes(s):
+ return '{http://www.itunes.com/dtds/podcast-1.0.dtd}%s' % s
+
+ item = rss.find('channel/item')
+
+ video_id = item.find(blip('item_id')).text
+ title = item.find('./title').text
+ description = clean_html(compat_str(item.find(blip('puredescription')).text))
+ timestamp = parse_iso8601(item.find(blip('datestamp')).text)
+ uploader = item.find(blip('user')).text
+ uploader_id = item.find(blip('userid')).text
+ duration = int(item.find(blip('runtime')).text)
+ media_thumbnail = item.find(media('thumbnail'))
+ thumbnail = media_thumbnail.get('url') if media_thumbnail is not None else item.find(itunes('image')).text
+ categories = [category.text for category in item.findall('category')]
- video_id = compat_str(data['item_id'])
- upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
- subtitles = {}
formats = []
- if 'additionalMedia' in data:
- for f in data['additionalMedia']:
- if f.get('file_type_srt') == 1:
- LANGS = {
- 'english': 'en',
- }
- lang = f['role'].rpartition('-')[-1].strip().lower()
- langcode = LANGS.get(lang, lang)
- subtitles[langcode] = f['url']
- continue
- if not int(f['media_width']): # filter m3u8
- continue
+ subtitles = {}
+
+ media_group = item.find(media('group'))
+ for media_content in media_group.findall(media('content')):
+ url = media_content.get('url')
+ role = media_content.get(blip('role'))
+ msg = self._download_webpage(
+ url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url',
+ video_id, 'Resolving URL for %s' % role)
+ real_url = compat_urlparse.parse_qs(msg)['message'][0]
+
+ media_type = media_content.get('type')
+ if media_type == 'text/srt' or url.endswith('.srt'):
+ LANGS = {
+ 'english': 'en',
+ }
+ lang = role.rpartition('-')[-1].strip().lower()
+ langcode = LANGS.get(lang, lang)
+ subtitles[langcode] = url
+ elif media_type.startswith('video/'):
formats.append({
- 'url': f['url'],
- 'format_id': f['role'],
- 'width': int(f['media_width']),
- 'height': int(f['media_height']),
+ 'url': real_url,
+ 'format_id': role,
+ 'format_note': media_type,
+ 'vcodec': media_content.get(blip('vcodec')),
+ 'acodec': media_content.get(blip('acodec')),
+ 'filesize': media_content.get('filesize'),
+ 'width': int(media_content.get('width')),
+ 'height': int(media_content.get('height')),
})
- else:
- formats.append({
- 'url': data['media']['url'],
- 'width': int(data['media']['width']),
- 'height': int(data['media']['height']),
- })
self._sort_formats(formats)
# subtitles
@@ -107,12 +129,14 @@ class BlipTVIE(SubtitlesInfoExtractor):
return {
'id': video_id,
- 'uploader': data['display_name'],
- 'upload_date': upload_date,
- 'title': data['title'],
- 'thumbnail': data['thumbnailUrl'],
- 'description': data['description'],
- 'user_agent': 'iTunes/10.6.1',
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'duration': duration,
+ 'thumbnail': thumbnail,
+ 'categories': categories,
'formats': formats,
'subtitles': video_subtitles,
}
diff --git a/youtube_dl/extractor/br.py b/youtube_dl/extractor/br.py
index b5b56ff00..993360714 100644
--- a/youtube_dl/extractor/br.py
+++ b/youtube_dl/extractor/br.py
@@ -17,15 +17,13 @@ class BRIE(InfoExtractor):
_TESTS = [
{
- 'url': 'http://www.br.de/mediathek/video/anselm-gruen-114.html',
- 'md5': 'c4f83cf0f023ba5875aba0bf46860df2',
+ 'url': 'http://www.br.de/mediathek/video/sendungen/heimatsound/heimatsound-festival-2014-trailer-100.html',
+ 'md5': '93556dd2bcb2948d9259f8670c516d59',
'info_dict': {
- 'id': '2c8d81c5-6fb7-4a74-88d4-e768e5856532',
+ 'id': '25e279aa-1ffd-40fd-9955-5325bd48a53a',
'ext': 'mp4',
- 'title': 'Feiern und Verzichten',
- 'description': 'Anselm Grün: Feiern und Verzichten',
- 'uploader': 'BR/Birgit Baier',
- 'upload_date': '20140301',
+ 'title': 'Am 1. und 2. August in Oberammergau',
+ 'description': 'md5:dfd224e5aa6819bc1fcbb7826a932021',
}
},
{
diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 3c02c297a..419951b62 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -15,6 +15,7 @@ from ..utils import (
compat_urllib_request,
compat_parse_qs,
+ determine_ext,
ExtractorError,
unsmuggle_url,
unescapeHTML,
@@ -29,10 +30,11 @@ class BrightcoveIE(InfoExtractor):
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
- 'file': '2371591881001.mp4',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
+ 'id': '2371591881001',
+ 'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
@@ -41,8 +43,9 @@ class BrightcoveIE(InfoExtractor):
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
- 'file': '1785452137001.flv',
'info_dict': {
+ 'id': '1785452137001',
+ 'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
@@ -70,7 +73,20 @@ class BrightcoveIE(InfoExtractor):
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
- }
+ },
+ {
+ # test flv videos served by akamaihd.net
+ # From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
+ 'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3ABC2996102916001&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
+ # The md5 checksum changes on each download
+ 'info_dict': {
+ 'id': '2996102916001',
+ 'ext': 'flv',
+ 'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
+ 'uploader': 'Red Bull TV',
+ 'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
+ },
+ },
]
@classmethod
@@ -187,7 +203,7 @@ class BrightcoveIE(InfoExtractor):
webpage = self._download_webpage(req, video_id)
self.report_extraction(video_id)
- info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
+ info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
@@ -219,12 +235,26 @@ class BrightcoveIE(InfoExtractor):
renditions = video_info.get('renditions')
if renditions:
- renditions = sorted(renditions, key=lambda r: r['size'])
- info['formats'] = [{
- 'url': rend['defaultURL'],
- 'height': rend.get('frameHeight'),
- 'width': rend.get('frameWidth'),
- } for rend in renditions]
+ formats = []
+ for rend in renditions:
+ url = rend['defaultURL']
+ if rend['remote']:
+ # This type of renditions are served through akamaihd.net,
+ # but they don't use f4m manifests
+ url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
+ ext = 'flv'
+ else:
+ ext = determine_ext(url)
+ size = rend.get('size')
+ formats.append({
+ 'url': url,
+ 'ext': ext,
+ 'height': rend.get('frameHeight'),
+ 'width': rend.get('frameWidth'),
+ 'filesize': size if size != 0 else None,
+ })
+ self._sort_formats(formats)
+ info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py
index 2301f61b6..496271be4 100644
--- a/youtube_dl/extractor/cinemassacre.py
+++ b/youtube_dl/extractor/cinemassacre.py
@@ -1,10 +1,12 @@
# encoding: utf-8
from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
+ int_or_none,
)
@@ -13,9 +15,10 @@ class CinemassacreIE(InfoExtractor):
_TESTS = [
{
'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
- 'file': '19911.mp4',
- 'md5': '782f8504ca95a0eba8fc9177c373eec7',
+ 'md5': 'fde81fbafaee331785f58cd6c0d46190',
'info_dict': {
+ 'id': '19911',
+ 'ext': 'mp4',
'upload_date': '20121110',
'title': '“Angry Video Game Nerd: The Movie” – Trailer',
'description': 'md5:fb87405fcb42a331742a0dce2708560b',
@@ -23,9 +26,10 @@ class CinemassacreIE(InfoExtractor):
},
{
'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
- 'file': '521be8ef82b16.mp4',
- 'md5': 'dec39ee5118f8d9cc067f45f9cbe3a35',
+ 'md5': 'd72f10cd39eac4215048f62ab477a511',
'info_dict': {
+ 'id': '521be8ef82b16',
+ 'ext': 'mp4',
'upload_date': '20131002',
'title': 'The Mummy’s Hand (1940)',
},
@@ -50,29 +54,40 @@ class CinemassacreIE(InfoExtractor):
r'<div class="entry-content">(?P<description>.+?)</div>',
webpage, 'description', flags=re.DOTALL, fatal=False)
- playerdata = self._download_webpage(playerdata_url, video_id)
+ playerdata = self._download_webpage(playerdata_url, video_id, 'Downloading player webpage')
+ video_thumbnail = self._search_regex(
+ r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
+ sd_url = self._search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
+ videolist_url = self._search_regex(r'file: \'([^\']+\.smil)\'}', playerdata, 'videolist_url')
- sd_url = self._html_search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
- hd_url = self._html_search_regex(
- r'file: \'([^\']+)\', label: \'HD\'', playerdata, 'hd_file',
- default=None)
- video_thumbnail = self._html_search_regex(r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
+ videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
- formats = [{
- 'url': sd_url,
- 'ext': 'mp4',
- 'format': 'sd',
- 'format_id': 'sd',
- 'quality': 1,
- }]
- if hd_url:
- formats.append({
- 'url': hd_url,
- 'ext': 'mp4',
- 'format': 'hd',
- 'format_id': 'hd',
- 'quality': 2,
- })
+ formats = []
+ baseurl = sd_url[:sd_url.rfind('/')+1]
+ for video in videolist.findall('.//video'):
+ src = video.get('src')
+ if not src:
+ continue
+ file_ = src.partition(':')[-1]
+ width = int_or_none(video.get('width'))
+ height = int_or_none(video.get('height'))
+ bitrate = int_or_none(video.get('system-bitrate'))
+ format = {
+ 'url': baseurl + file_,
+ 'format_id': src.rpartition('.')[0].rpartition('_')[-1],
+ }
+ if width or height:
+ format.update({
+ 'tbr': bitrate // 1000 if bitrate else None,
+ 'width': width,
+ 'height': height,
+ })
+ else:
+ format.update({
+ 'abr': bitrate // 1000 if bitrate else None,
+ 'vcodec': 'none',
+ })
+ formats.append(format)
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/cmt.py b/youtube_dl/extractor/cmt.py
index 88e0e9aba..e96c59f71 100644
--- a/youtube_dl/extractor/cmt.py
+++ b/youtube_dl/extractor/cmt.py
@@ -1,19 +1,19 @@
+from __future__ import unicode_literals
from .mtv import MTVIE
+
class CMTIE(MTVIE):
- IE_NAME = u'cmt.com'
+ IE_NAME = 'cmt.com'
_VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml'
_FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/'
- _TESTS = [
- {
- u'url': u'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061',
- u'md5': u'e6b7ef3c4c45bbfae88061799bbba6c2',
- u'info_dict': {
- u'id': u'989124',
- u'ext': u'mp4',
- u'title': u'Garth Brooks - "The Call (featuring Trisha Yearwood)"',
- u'description': u'Blame It All On My Roots',
- },
+ _TESTS = [{
+ 'url': 'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061',
+ 'md5': 'e6b7ef3c4c45bbfae88061799bbba6c2',
+ 'info_dict': {
+ 'id': '989124',
+ 'ext': 'mp4',
+ 'title': 'Garth Brooks - "The Call (featuring Trisha Yearwood)"',
+ 'description': 'Blame It All On My Roots',
},
- ]
+ }]
diff --git a/youtube_dl/extractor/cnn.py b/youtube_dl/extractor/cnn.py
index b32cb8980..dae40c136 100644
--- a/youtube_dl/extractor/cnn.py
+++ b/youtube_dl/extractor/cnn.py
@@ -79,8 +79,11 @@ class CNNIE(InfoExtractor):
self._sort_formats(formats)
- thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')])
- thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails]
+ thumbnails = [{
+ 'height': int(t.attrib['height']),
+ 'width': int(t.attrib['width']),
+ 'url': t.text,
+ } for t in info.findall('images/image')]
metas_el = info.find('metas')
upload_date = (
@@ -93,8 +96,7 @@ class CNNIE(InfoExtractor):
'id': info.attrib['id'],
'title': info.find('headline').text,
'formats': formats,
- 'thumbnail': thumbnails[-1][1],
- 'thumbnails': thumbs_dict,
+ 'thumbnails': thumbnails,
'description': info.find('description').text,
'duration': duration,
'upload_date': upload_date,
diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py
index 6e3a316c6..8af0abade 100644
--- a/youtube_dl/extractor/comedycentral.py
+++ b/youtube_dl/extractor/comedycentral.py
@@ -130,7 +130,7 @@ class ComedyCentralShowsIE(InfoExtractor):
raise ExtractorError('Invalid redirected URL: ' + url)
if mobj.group('episode') == '':
raise ExtractorError('Redirected URL is still not specific: ' + url)
- epTitle = mobj.group('episode').rpartition('/')[-1]
+ epTitle = (mobj.group('episode') or mobj.group('videotitle')).rpartition('/')[-1]
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
if len(mMovieParams) == 0:
@@ -188,7 +188,7 @@ class ComedyCentralShowsIE(InfoExtractor):
})
formats.append({
'format_id': 'rtmp-%s' % format,
- 'url': rtmp_video_url,
+ 'url': rtmp_video_url.replace('viacomccstrm', 'viacommtvstrm'),
'ext': self._video_extensions.get(format, 'mp4'),
'height': h,
'width': w,
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 11b31db88..f1ed30704 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1,11 +1,12 @@
import base64
import hashlib
import json
+import netrc
import os
import re
import socket
import sys
-import netrc
+import time
import xml.etree.ElementTree
from ..utils import (
@@ -92,8 +93,12 @@ class InfoExtractor(object):
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
- thumbnails: A list of dictionaries (with the entries "resolution" and
- "url") for the varying thumbnails
+ thumbnails: A list of dictionaries, with the following entries:
+ * "url"
+ * "width" (optional, int)
+ * "height" (optional, int)
+ * "resolution" (optional, string "{width}x{height"},
+ deprecated)
thumbnail: Full URL to a video thumbnail image.
description: One-line video description.
uploader: Full name of the video uploader.
@@ -113,6 +118,8 @@ class InfoExtractor(object):
webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
+ categories: A list of categories that the video falls in, for example
+ ["Sports", "Berlin"]
Unless mentioned otherwise, the fields should be Unicode strings.
@@ -242,7 +249,7 @@ class InfoExtractor(object):
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
- basen = video_id + '_' + url
+ basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = u'___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
@@ -453,6 +460,9 @@ class InfoExtractor(object):
if secure: regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
+ def _og_search_url(self, html, **kargs):
+ return self._og_search_property('url', html, **kargs)
+
def _html_search_meta(self, name, html, display_name=None, fatal=False):
if display_name is None:
display_name = name
@@ -566,6 +576,13 @@ class InfoExtractor(object):
else:
return url
+ def _sleep(self, timeout, video_id, msg_template=None):
+ if msg_template is None:
+ msg_template = u'%(video_id)s: Waiting for %(timeout)s seconds'
+ msg = msg_template % {'video_id': video_id, 'timeout': timeout}
+ self.to_screen(msg)
+ time.sleep(timeout)
+
class SearchInfoExtractor(InfoExtractor):
"""
@@ -609,4 +626,3 @@ class SearchInfoExtractor(InfoExtractor):
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
-
diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py
index 55216201f..5d0bfe454 100644
--- a/youtube_dl/extractor/dailymotion.py
+++ b/youtube_dl/extractor/dailymotion.py
@@ -150,7 +150,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
return {
'id': video_id,
'formats': formats,
- 'uploader': info['owner_screenname'],
+ 'uploader': info['owner.screenname'],
'upload_date': video_upload_date,
'title': self._og_search_title(webpage),
'subtitles': video_subtitles,
diff --git a/youtube_dl/extractor/discovery.py b/youtube_dl/extractor/discovery.py
index 2ae6ecc12..554df6735 100644
--- a/youtube_dl/extractor/discovery.py
+++ b/youtube_dl/extractor/discovery.py
@@ -7,9 +7,9 @@ from .common import InfoExtractor
class DiscoveryIE(InfoExtractor):
- _VALID_URL = r'http://dsc\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
+ _VALID_URL = r'http://www\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
_TEST = {
- 'url': 'http://dsc.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
+ 'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
'md5': 'e12614f9ee303a6ccef415cb0793eba2',
'info_dict': {
'id': '614784',
diff --git a/youtube_dl/extractor/dreisat.py b/youtube_dl/extractor/dreisat.py
index 0b11d1f10..69ca75423 100644
--- a/youtube_dl/extractor/dreisat.py
+++ b/youtube_dl/extractor/dreisat.py
@@ -1,39 +1,37 @@
-# coding: utf-8
+from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import (
- unified_strdate,
-)
+from ..utils import unified_strdate
class DreiSatIE(InfoExtractor):
IE_NAME = '3sat'
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
_TEST = {
- u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983",
- u'file': u'36983.mp4',
- u'md5': u'9dcfe344732808dbfcc901537973c922',
- u'info_dict': {
- u"title": u"Kaffeeland Schweiz",
- u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",
- u"uploader": u"3sat",
- u"upload_date": u"20130622"
+ 'url': 'http://www.3sat.de/mediathek/index.php?obj=36983',
+ 'md5': '9dcfe344732808dbfcc901537973c922',
+ 'info_dict': {
+ 'id': '36983',
+ 'ext': 'mp4',
+ 'title': 'Kaffeeland Schweiz',
+ 'description': 'md5:cc4424b18b75ae9948b13929a0814033',
+ 'uploader': '3sat',
+ 'upload_date': '20130622'
}
}
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
- details_doc = self._download_xml(details_url, video_id, note=u'Downloading video details')
+ details_doc = self._download_xml(details_url, video_id, 'Downloading video details')
thumbnail_els = details_doc.findall('.//teaserimage')
thumbnails = [{
- 'width': te.attrib['key'].partition('x')[0],
- 'height': te.attrib['key'].partition('x')[2],
+ 'width': int(te.attrib['key'].partition('x')[0]),
+ 'height': int(te.attrib['key'].partition('x')[2]),
'url': te.text,
} for te in thumbnail_els]
diff --git a/youtube_dl/extractor/drtv.py b/youtube_dl/extractor/drtv.py
new file mode 100644
index 000000000..cdccfd376
--- /dev/null
+++ b/youtube_dl/extractor/drtv.py
@@ -0,0 +1,91 @@
+from __future__ import unicode_literals
+
+import re
+
+from .subtitles import SubtitlesInfoExtractor
+from .common import ExtractorError
+from ..utils import parse_iso8601
+
+
+class DRTVIE(SubtitlesInfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/[^/]+/(?P<id>[\da-z-]+)'
+
+ _TEST = {
+ 'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8',
+ 'md5': '4a7e1dd65cdb2643500a3f753c942f25',
+ 'info_dict': {
+ 'id': 'partiets-mand-7-8',
+ 'ext': 'mp4',
+ 'title': 'Partiets mand (7:8)',
+ 'description': 'md5:a684b90a8f9336cd4aab94b7647d7862',
+ 'timestamp': 1403047940,
+ 'upload_date': '20140617',
+ 'duration': 1299.040,
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ programcard = self._download_json(
+ 'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON')
+
+ data = programcard['Data'][0]
+
+ title = data['Title']
+ description = data['Description']
+ timestamp = parse_iso8601(data['CreatedTime'][:-5])
+
+ thumbnail = None
+ duration = None
+
+ restricted_to_denmark = False
+
+ formats = []
+ subtitles = {}
+
+ for asset in data['Assets']:
+ if asset['Kind'] == 'Image':
+ thumbnail = asset['Uri']
+ elif asset['Kind'] == 'VideoResource':
+ duration = asset['DurationInMilliseconds'] / 1000.0
+ restricted_to_denmark = asset['RestrictedToDenmark']
+ for link in asset['Links']:
+ target = link['Target']
+ uri = link['Uri']
+ formats.append({
+ 'url': uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43' if target == 'HDS' else uri,
+ 'format_id': target,
+ 'ext': link['FileFormat'],
+ 'preference': -1 if target == 'HDS' else -2,
+ })
+ subtitles_list = asset.get('SubtitlesList')
+ if isinstance(subtitles_list, list):
+ LANGS = {
+ 'Danish': 'dk',
+ }
+ for subs in subtitles_list:
+ lang = subs['Language']
+ subtitles[LANGS.get(lang, lang)] = subs['Uri']
+
+ if not formats and restricted_to_denmark:
+ raise ExtractorError(
+ 'Unfortunately, DR is not allowed to show this program outside Denmark.', expected=True)
+
+ self._sort_formats(formats)
+
+ if self._downloader.params.get('listsubtitles', False):
+ self._list_available_subtitles(video_id, subtitles)
+ return
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': self.extract_subtitles(video_id, subtitles),
+ }
diff --git a/youtube_dl/extractor/empflix.py b/youtube_dl/extractor/empflix.py
index eaeee5a51..e6952588f 100644
--- a/youtube_dl/extractor/empflix.py
+++ b/youtube_dl/extractor/empflix.py
@@ -3,20 +3,18 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
class EmpflixIE(InfoExtractor):
_VALID_URL = r'^https?://www\.empflix\.com/videos/.*?-(?P<id>[0-9]+)\.html'
_TEST = {
'url': 'http://www.empflix.com/videos/Amateur-Finger-Fuck-33051.html',
- 'md5': '5e5cc160f38ca9857f318eb97146e13e',
+ 'md5': 'b1bc15b6412d33902d6e5952035fcabc',
'info_dict': {
'id': '33051',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Amateur Finger Fuck',
+ 'description': 'Amateur solo finger fucking.',
'age_limit': 18,
}
}
@@ -30,6 +28,8 @@ class EmpflixIE(InfoExtractor):
video_title = self._html_search_regex(
r'name="title" value="(?P<title>[^"]*)"', webpage, 'title')
+ video_description = self._html_search_regex(
+ r'name="description" value="([^"]*)"', webpage, 'description', fatal=False)
cfg_url = self._html_search_regex(
r'flashvars\.config = escape\("([^"]+)"',
@@ -37,12 +37,18 @@ class EmpflixIE(InfoExtractor):
cfg_xml = self._download_xml(
cfg_url, video_id, note='Downloading metadata')
- video_url = cfg_xml.find('videoLink').text
+
+ formats = [
+ {
+ 'url': item.find('videoLink').text,
+ 'format_id': item.find('res').text,
+ } for item in cfg_xml.findall('./quality/item')
+ ]
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'flv',
'title': video_title,
+ 'description': video_description,
+ 'formats': formats,
'age_limit': age_limit,
}
diff --git a/youtube_dl/extractor/extremetube.py b/youtube_dl/extractor/extremetube.py
index ff7c0cd3e..14a196ffc 100644
--- a/youtube_dl/extractor/extremetube.py
+++ b/youtube_dl/extractor/extremetube.py
@@ -37,7 +37,7 @@ class ExtremeTubeIE(InfoExtractor):
webpage = self._download_webpage(req, video_id)
video_title = self._html_search_regex(
- r'<h1 [^>]*?title="([^"]+)"[^>]*>\1<', webpage, 'title')
+ r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
uploader = self._html_search_regex(
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
fatal=False)
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index ca8993241..c663a0f81 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -13,7 +13,7 @@ from ..utils import (
class FC2IE(InfoExtractor):
- _VALID_URL = r'^http://video\.fc2\.com/(?P<lang>[^/]+)/content/(?P<id>[^/]+)'
+ _VALID_URL = r'^http://video\.fc2\.com/((?P<lang>[^/]+)/)?content/(?P<id>[^/]+)'
IE_NAME = 'fc2'
_TEST = {
'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',
@@ -36,7 +36,7 @@ class FC2IE(InfoExtractor):
thumbnail = self._og_search_thumbnail(webpage)
refer = url.replace('/content/', '/a/content/')
- mimi = hashlib.md5(video_id + '_gGddgPfeaf_gzyr').hexdigest()
+ mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
info_url = (
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
@@ -50,10 +50,13 @@ class FC2IE(InfoExtractor):
raise ExtractorError('Error code: %s' % info['err_code'][0])
video_url = info['filepath'][0] + '?mid=' + info['mid'][0]
+ title_info = info.get('title')
+ if title_info:
+ title = title_info[0]
return {
'id': video_id,
- 'title': info['title'][0],
+ 'title': title,
'url': video_url,
'ext': 'flv',
'thumbnail': thumbnail,
diff --git a/youtube_dl/extractor/firstpost.py b/youtube_dl/extractor/firstpost.py
index eccd8dde9..0993af1c9 100644
--- a/youtube_dl/extractor/firstpost.py
+++ b/youtube_dl/extractor/firstpost.py
@@ -15,6 +15,7 @@ class FirstpostIE(InfoExtractor):
'id': '1025403',
'ext': 'mp4',
'title': 'India to launch indigenous aircraft carrier INS Vikrant today',
+ 'description': 'md5:feef3041cb09724e0bdc02843348f5f4',
}
}
@@ -22,13 +23,16 @@ class FirstpostIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
+ page = self._download_webpage(url, video_id)
+ title = self._html_search_meta('twitter:title', page, 'title')
+ description = self._html_search_meta('twitter:description', page, 'title')
+
data = self._download_xml(
'http://www.firstpost.com/getvideoxml-%s.xml' % video_id, video_id,
'Downloading video XML')
item = data.find('./playlist/item')
thumbnail = item.find('./image').text
- title = item.find('./title').text
formats = [
{
@@ -42,6 +46,7 @@ class FirstpostIE(InfoExtractor):
return {
'id': video_id,
'title': title,
+ 'description': description,
'thumbnail': thumbnail,
'formats': formats,
}
diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index 51eb97b2f..f3e0f38b7 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -48,24 +48,36 @@ class PluzzIE(FranceTVBaseInfoExtractor):
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetvinfo.fr'
- _VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+)\.html'
+ _VALID_URL = r'https?://www\.francetvinfo\.fr/.*/(?P<title>.+)\.html'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
- 'file': '84981923.mp4',
'info_dict': {
+ 'id': '84981923',
+ 'ext': 'mp4',
'title': 'Soir 3',
},
'params': {
'skip_download': True,
},
- }
+ }, {
+ 'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
+ 'info_dict': {
+ 'id': 'EV_20019',
+ 'ext': 'mp4',
+ 'title': 'Débat des candidats à la Commission européenne',
+ 'description': 'Débat des candidats à la Commission européenne',
+ },
+ 'params': {
+ 'skip_download': 'HLS (reqires ffmpeg)'
+ }
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
- video_id = self._search_regex(r'id-video=(\d+?)[@"]', webpage, 'video id')
+ video_id = self._search_regex(r'id-video=((?:[^0-9]*?_)?[0-9]+)[@"]', webpage, 'video id')
return self._extract_video(video_id)
diff --git a/youtube_dl/extractor/gamekings.py b/youtube_dl/extractor/gamekings.py
index 233398966..11fee3d31 100644
--- a/youtube_dl/extractor/gamekings.py
+++ b/youtube_dl/extractor/gamekings.py
@@ -15,7 +15,7 @@ class GamekingsIE(InfoExtractor):
'id': '20130811',
'ext': 'mp4',
'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
- 'description': 'md5:632e61a9f97d700e83f43d77ddafb6a4',
+ 'description': 'md5:36fd701e57e8c15ac8682a2374c99731',
}
}
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index 1ae55dc5a..f97b59845 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -260,7 +260,35 @@ class GenericIE(InfoExtractor):
'uploader': 'Spi0n',
},
'add_ie': ['Dailymotion'],
- }
+ },
+ # YouTube embed
+ {
+ 'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
+ 'info_dict': {
+ 'id': 'FXRb4ykk4S0',
+ 'ext': 'mp4',
+ 'title': 'The NBL Auction 2014',
+ 'uploader': 'BADMINTON England',
+ 'uploader_id': 'BADMINTONEvents',
+ 'upload_date': '20140603',
+ 'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
+ },
+ 'add_ie': ['Youtube'],
+ 'params': {
+ 'skip_download': True,
+ }
+ },
+ # MTVSercices embed
+ {
+ 'url': 'http://www.gametrailers.com/news-post/76093/north-america-europe-is-getting-that-mario-kart-8-mercedes-dlc-too',
+ 'md5': '35727f82f58c76d996fc188f9755b0d5',
+ 'info_dict': {
+ 'id': '0306a69b-8adf-4fb5-aace-75f8e8cbfca9',
+ 'ext': 'mp4',
+ 'title': 'Review',
+ 'description': 'Mario\'s life in the fast lane has never looked so good.',
+ },
+ },
]
def report_download_webpage(self, video_id):
@@ -355,7 +383,7 @@ class GenericIE(InfoExtractor):
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
- default_search = 'auto_warning'
+ default_search = 'error'
if default_search in ('auto', 'auto_warning'):
if '/' in url:
@@ -363,9 +391,19 @@ class GenericIE(InfoExtractor):
return self.url_result('http://' + url)
else:
if default_search == 'auto_warning':
- self._downloader.report_warning(
- 'Falling back to youtube search for %s . Set --default-search to "auto" to suppress this warning.' % url)
+ if re.match(r'^(?:url|URL)$', url):
+ raise ExtractorError(
+ 'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
+ expected=True)
+ else:
+ self._downloader.report_warning(
+ 'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
+ elif default_search == 'error':
+ raise ExtractorError(
+ ('%r is not a valid URL. '
+ 'Set --default-search "ytseach" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
+ ) % (url, url), expected=True)
else:
assert ':' in default_search
return self.url_result(default_search + url)
@@ -473,8 +511,13 @@ class GenericIE(InfoExtractor):
# Look for embedded YouTube player
matches = re.findall(r'''(?x)
- (?:<iframe[^>]+?src=|embedSWF\(\s*)
- (["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
+ (?:
+ <iframe[^>]+?src=|
+ <embed[^>]+?src=|
+ embedSWF\(?:\s*
+ )
+ (["\'])
+ (?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
(?:embed|v)/.+?)
\1''', webpage)
if matches:
@@ -560,7 +603,7 @@ class GenericIE(InfoExtractor):
# Look for embedded NovaMov-based player
mobj = re.search(
- r'''(?x)<iframe[^>]+?src=(["\'])
+ r'''(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
@@ -582,6 +625,11 @@ class GenericIE(InfoExtractor):
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
+ # Look for embedded ivi player
+ mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
+ if mobj is not None:
+ return self.url_result(mobj.group('url'), 'Ivi')
+
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
@@ -641,6 +689,22 @@ class GenericIE(InfoExtractor):
url = unescapeHTML(mobj.group('url'))
return self.url_result(url)
+ # Look for embedded vulture.com player
+ mobj = re.search(
+ r'<iframe src="(?P<url>https?://video\.vulture\.com/[^"]+)"',
+ webpage)
+ if mobj is not None:
+ url = unescapeHTML(mobj.group('url'))
+ return self.url_result(url, ie='Vulture')
+
+ # Look for embedded mtvservices player
+ mobj = re.search(
+ r'<iframe src="(?P<url>https?://media\.mtvnservices\.com/embed/[^"]+)"',
+ webpage)
+ if mobj is not None:
+ url = unescapeHTML(mobj.group('url'))
+ return self.url_result(url, ie='MTVServicesEmbedded')
+
# Start with something easy: JW Player in SWFObject
found = re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
if not found:
@@ -672,7 +736,7 @@ class GenericIE(InfoExtractor):
# HTML5 video
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage)
if not found:
- found = re.findall(
+ found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
webpage)
diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py
index cc29a7e5d..07d994b44 100644
--- a/youtube_dl/extractor/googleplus.py
+++ b/youtube_dl/extractor/googleplus.py
@@ -52,8 +52,7 @@ class GooglePlusIE(InfoExtractor):
# Extract title
# Get the first line for title
- video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
- webpage, 'title', default='NA')
+ video_title = self._og_search_description(webpage).splitlines()[0]
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
diff --git a/youtube_dl/extractor/gorillavid.py b/youtube_dl/extractor/gorillavid.py
new file mode 100644
index 000000000..50ef54cce
--- /dev/null
+++ b/youtube_dl/extractor/gorillavid.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+
+
+class GorillaVidIE(InfoExtractor):
+ IE_DESC = 'GorillaVid.in and daclips.in'
+ _VALID_URL = r'''(?x)
+ https?://(?:www\.)?
+ (?:daclips\.in|gorillavid\.in)/
+ (?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
+ '''
+
+ _TESTS = [{
+ 'url': 'http://gorillavid.in/06y9juieqpmi',
+ 'md5': '5ae4a3580620380619678ee4875893ba',
+ 'info_dict': {
+ 'id': '06y9juieqpmi',
+ 'ext': 'flv',
+ 'title': 'Rebecca Black My Moment Official Music Video Reaction',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://gorillavid.in/embed-z08zf8le23c6-960x480.html',
+ 'md5': 'c9e293ca74d46cad638e199c3f3fe604',
+ 'info_dict': {
+ 'id': 'z08zf8le23c6',
+ 'ext': 'mp4',
+ 'title': 'Say something nice',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://daclips.in/3rso4kdn6f9m',
+ 'info_dict': {
+ 'id': '3rso4kdn6f9m',
+ 'ext': 'mp4',
+ 'title': 'Micro Pig piglets ready on 16th July 2009',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ fields = dict(re.findall(r'''(?x)<input\s+
+ type="hidden"\s+
+ name="([^"]+)"\s+
+ (?:id="[^"]+"\s+)?
+ value="([^"]*)"
+ ''', webpage))
+
+ if fields['op'] == 'download1':
+ post = compat_urllib_parse.urlencode(fields)
+
+ req = compat_urllib_request.Request(url, post)
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+
+ webpage = self._download_webpage(req, video_id, 'Downloading video page')
+
+ title = self._search_regex(r'style="z-index: [0-9]+;">([0-9a-zA-Z ]+)(?:-.+)?</span>', webpage, 'title')
+ thumbnail = self._search_regex(r'image:\'(http[^\']+)\',', webpage, 'thumbnail')
+ url = self._search_regex(r'file: \'(http[^\']+)\',', webpage, 'file url')
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ 'ext': determine_ext(url),
+ 'quality': 1,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/goshgay.py b/youtube_dl/extractor/goshgay.py
new file mode 100644
index 000000000..7bca21ad0
--- /dev/null
+++ b/youtube_dl/extractor/goshgay.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ compat_urlparse,
+ str_to_int,
+ ExtractorError,
+)
+import json
+
+
+class GoshgayIE(InfoExtractor):
+ _VALID_URL = r'^(?:https?://)www.goshgay.com/video(?P<id>\d+?)($|/)'
+ _TEST = {
+ 'url': 'http://www.goshgay.com/video4116282',
+ 'md5': '268b9f3c3229105c57859e166dd72b03',
+ 'info_dict': {
+ 'id': '4116282',
+ 'ext': 'flv',
+ 'title': 'md5:089833a4790b5e103285a07337f245bf',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ title = self._search_regex(r'class="video-title"><h1>(.+?)<', webpage, 'title')
+
+ player_config = self._search_regex(
+ r'(?s)jwplayer\("player"\)\.setup\(({.+?})\)', webpage, 'config settings')
+ player_vars = json.loads(player_config.replace("'", '"'))
+ width = str_to_int(player_vars.get('width'))
+ height = str_to_int(player_vars.get('height'))
+ config_uri = player_vars.get('config')
+
+ if config_uri is None:
+ raise ExtractorError('Missing config URI')
+ node = self._download_xml(config_uri, video_id, 'Downloading player config XML',
+ errnote='Unable to download XML')
+ if node is None:
+ raise ExtractorError('Missing config XML')
+ if node.tag != 'config':
+ raise ExtractorError('Missing config attribute')
+ fns = node.findall('file')
+ imgs = node.findall('image')
+ if len(fns) != 1:
+ raise ExtractorError('Missing media URI')
+ video_url = fns[0].text
+ if len(imgs) < 1:
+ thumbnail = None
+ else:
+ thumbnail = imgs[0].text
+
+ url_comp = compat_urlparse.urlparse(url)
+ ref = "%s://%s%s" % (url_comp[0], url_comp[1], url_comp[2])
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'width': width,
+ 'height': height,
+ 'thumbnail': thumbnail,
+ 'http_referer': ref,
+ 'age_limit': 18,
+ }
diff --git a/youtube_dl/extractor/hypem.py b/youtube_dl/extractor/hypem.py
index 9bd06e7c7..6d0d847c6 100644
--- a/youtube_dl/extractor/hypem.py
+++ b/youtube_dl/extractor/hypem.py
@@ -1,10 +1,11 @@
+from __future__ import unicode_literals
+
import json
import re
import time
from .common import InfoExtractor
from ..utils import (
- compat_str,
compat_urllib_parse,
compat_urllib_request,
@@ -13,59 +14,55 @@ from ..utils import (
class HypemIE(InfoExtractor):
- """Information Extractor for hypem"""
- _VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
+ _VALID_URL = r'http://(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
_TEST = {
- u'url': u'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
- u'file': u'1v6ga.mp3',
- u'md5': u'b9cc91b5af8995e9f0c1cee04c575828',
- u'info_dict': {
- u"title": u"Tame"
+ 'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
+ 'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
+ 'info_dict': {
+ 'id': '1v6ga',
+ 'ext': 'mp3',
+ 'title': 'Tame',
+ 'uploader': 'BODYWORK',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
track_id = mobj.group(1)
data = {'ax': 1, 'ts': time.time()}
data_encoded = compat_urllib_parse.urlencode(data)
complete_url = url + "?" + data_encoded
request = compat_urllib_request.Request(complete_url)
- response, urlh = self._download_webpage_handle(request, track_id, u'Downloading webpage with the url')
+ response, urlh = self._download_webpage_handle(
+ request, track_id, 'Downloading webpage with the url')
cookie = urlh.headers.get('Set-Cookie', '')
- self.report_extraction(track_id)
-
- html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
- response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
+ html_tracks = self._html_search_regex(
+ r'(?ms)<script type="application/json" id="displayList-data">\s*(.*?)\s*</script>',
+ response, 'tracks')
try:
track_list = json.loads(html_tracks)
- track = track_list[u'tracks'][0]
+ track = track_list['tracks'][0]
except ValueError:
- raise ExtractorError(u'Hypemachine contained invalid JSON.')
+ raise ExtractorError('Hypemachine contained invalid JSON.')
- key = track[u"key"]
- track_id = track[u"id"]
- artist = track[u"artist"]
- title = track[u"song"]
+ key = track['key']
+ track_id = track['id']
+ artist = track['artist']
+ title = track['song']
- serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
- request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
+ serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key)
+ request = compat_urllib_request.Request(
+ serve_url, '', {'Content-Type': 'application/json'})
request.add_header('cookie', cookie)
- song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
- try:
- song_data = json.loads(song_data_json)
- except ValueError:
- raise ExtractorError(u'Hypemachine contained invalid JSON.')
- final_url = song_data[u"url"]
+ song_data = self._download_json(request, track_id, 'Downloading metadata')
+ final_url = song_data["url"]
- return [{
- 'id': track_id,
- 'url': final_url,
- 'ext': "mp3",
- 'title': title,
- 'artist': artist,
- }]
+ return {
+ 'id': track_id,
+ 'url': final_url,
+ 'ext': 'mp3',
+ 'title': title,
+ 'uploader': artist,
+ }
diff --git a/youtube_dl/extractor/ivi.py b/youtube_dl/extractor/ivi.py
index 1ba4966c7..4027deb70 100644
--- a/youtube_dl/extractor/ivi.py
+++ b/youtube_dl/extractor/ivi.py
@@ -14,7 +14,7 @@ from ..utils import (
class IviIE(InfoExtractor):
IE_DESC = 'ivi.ru'
IE_NAME = 'ivi'
- _VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch(?:/(?P<compilationid>[^/]+))?/(?P<videoid>\d+)'
+ _VALID_URL = r'https?://(?:www\.)?ivi\.ru/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<videoid>\d+)'
_TESTS = [
# Single movie
@@ -33,14 +33,14 @@ class IviIE(InfoExtractor):
},
# Serial's serie
{
- 'url': 'http://www.ivi.ru/watch/dezhurnyi_angel/74791',
- 'md5': '3e6cc9a848c1d2ebcc6476444967baa9',
+ 'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
+ 'md5': '221f56b35e3ed815fde2df71032f4b3e',
'info_dict': {
- 'id': '74791',
+ 'id': '9549',
'ext': 'mp4',
- 'title': 'Дежурный ангел - 1 серия',
- 'duration': 2490,
- 'thumbnail': 'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
+ 'title': 'Двое из ларца - Серия 1',
+ 'duration': 2655,
+ 'thumbnail': 'http://thumbs.ivi.ru/f15.vcp.digitalaccess.ru/contents/8/4/0068dc0677041f3336b7c2baad8fc0.jpg',
},
'skip': 'Only works from Russia',
}
diff --git a/youtube_dl/extractor/ku6.py b/youtube_dl/extractor/ku6.py
new file mode 100644
index 000000000..484239b19
--- /dev/null
+++ b/youtube_dl/extractor/ku6.py
@@ -0,0 +1,35 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class Ku6IE(InfoExtractor):
+ _VALID_URL = r'http://v\.ku6\.com/show/(?P<id>[a-zA-Z0-9\-\_]+)(?:\.)*html'
+ _TEST = {
+ 'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html',
+ 'md5': '01203549b9efbb45f4b87d55bdea1ed1',
+ 'info_dict': {
+ 'id': 'JG-8yS14xzBr4bCn1pu0xw',
+ 'ext': 'f4v',
+ 'title': 'techniques test',
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ title = self._search_regex(r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
+ dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
+ jsonData = self._download_json(dataUrl, video_id)
+ downloadUrl = jsonData['data']['f']
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': downloadUrl
+ }
+
diff --git a/youtube_dl/extractor/lifenews.py b/youtube_dl/extractor/lifenews.py
index 7a431a274..8d9491f23 100644
--- a/youtube_dl/extractor/lifenews.py
+++ b/youtube_dl/extractor/lifenews.py
@@ -24,7 +24,7 @@ class LifeNewsIE(InfoExtractor):
'ext': 'mp4',
'title': 'МВД разыскивает мужчин, оставивших в IKEA сумку с автоматом',
'description': 'Камеры наблюдения гипермаркета зафиксировали троих мужчин, спрятавших оружейный арсенал в камере хранения.',
- 'thumbnail': 'http://lifenews.ru/static/posts/2014/1/126342/.video.jpg',
+ 'thumbnail': 're:http://.*\.jpg',
'upload_date': '20140130',
}
}
diff --git a/youtube_dl/extractor/livestream.py b/youtube_dl/extractor/livestream.py
index 1dcd1fb2d..2c100d424 100644
--- a/youtube_dl/extractor/livestream.py
+++ b/youtube_dl/extractor/livestream.py
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
import re
import json
@@ -6,31 +8,35 @@ from ..utils import (
compat_urllib_parse_urlparse,
compat_urlparse,
xpath_with_ns,
+ compat_str,
+ orderedSet,
)
class LivestreamIE(InfoExtractor):
- IE_NAME = u'livestream'
+ IE_NAME = 'livestream'
_VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$'
_TEST = {
- u'url': u'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
- u'file': u'4719370.mp4',
- u'md5': u'0d2186e3187d185a04b3cdd02b828836',
- u'info_dict': {
- u'title': u'Live from Webster Hall NYC',
- u'upload_date': u'20121012',
+ 'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
+ 'md5': '53274c76ba7754fb0e8d072716f2292b',
+ 'info_dict': {
+ 'id': '4719370',
+ 'ext': 'mp4',
+ 'title': 'Live from Webster Hall NYC',
+ 'upload_date': '20121012',
}
}
def _extract_video_info(self, video_data):
video_url = video_data.get('progressive_url_hd') or video_data.get('progressive_url')
- return {'id': video_data['id'],
- 'url': video_url,
- 'ext': 'mp4',
- 'title': video_data['caption'],
- 'thumbnail': video_data['thumbnail_url'],
- 'upload_date': video_data['updated_at'].replace('-','')[:8],
- }
+ return {
+ 'id': compat_str(video_data['id']),
+ 'url': video_url,
+ 'ext': 'mp4',
+ 'title': video_data['caption'],
+ 'thumbnail': video_data['thumbnail_url'],
+ 'upload_date': video_data['updated_at'].replace('-', '')[:8],
+ }
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -40,43 +46,43 @@ class LivestreamIE(InfoExtractor):
if video_id is None:
# This is an event page:
- config_json = self._search_regex(r'window.config = ({.*?});',
- webpage, u'window config')
+ config_json = self._search_regex(
+ r'window.config = ({.*?});', webpage, 'window config')
info = json.loads(config_json)['event']
videos = [self._extract_video_info(video_data['data'])
- for video_data in info['feed']['data'] if video_data['type'] == u'video']
+ for video_data in info['feed']['data'] if video_data['type'] == 'video']
return self.playlist_result(videos, info['id'], info['full_name'])
else:
- og_video = self._og_search_video_url(webpage, name=u'player url')
+ og_video = self._og_search_video_url(webpage, 'player url')
query_str = compat_urllib_parse_urlparse(og_video).query
query = compat_urlparse.parse_qs(query_str)
api_url = query['play_url'][0].replace('.smil', '')
- info = json.loads(self._download_webpage(api_url, video_id,
- u'Downloading video info'))
+ info = json.loads(self._download_webpage(
+ api_url, video_id, 'Downloading video info'))
return self._extract_video_info(info)
# The original version of Livestream uses a different system
class LivestreamOriginalIE(InfoExtractor):
- IE_NAME = u'livestream:original'
- _VALID_URL = r'https?://www\.livestream\.com/(?P<user>[^/]+)/video\?.*?clipId=(?P<id>.*?)(&|$)'
+ IE_NAME = 'livestream:original'
+ _VALID_URL = r'''(?x)https?://www\.livestream\.com/
+ (?P<user>[^/]+)/(?P<type>video|folder)
+ (?:\?.*?Id=|/)(?P<id>.*?)(&|$)
+ '''
_TEST = {
- u'url': u'http://www.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
- u'info_dict': {
- u'id': u'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
- u'ext': u'flv',
- u'title': u'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
+ 'url': 'http://www.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
+ 'info_dict': {
+ 'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
+ 'ext': 'flv',
+ 'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
},
- u'params': {
+ 'params': {
# rtmp
- u'skip_download': True,
+ 'skip_download': True,
},
}
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
- user = mobj.group('user')
+ def _extract_video(self, user, video_id):
api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
info = self._download_xml(api_url, video_id)
@@ -84,7 +90,7 @@ class LivestreamOriginalIE(InfoExtractor):
ns = {'media': 'http://search.yahoo.com/mrss'}
thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url']
# Remove the extension and number from the path (like 1.jpg)
- path = self._search_regex(r'(user-files/.+)_.*?\.jpg$', thumbnail_url, u'path')
+ path = self._search_regex(r'(user-files/.+)_.*?\.jpg$', thumbnail_url, 'path')
return {
'id': video_id,
@@ -94,3 +100,44 @@ class LivestreamOriginalIE(InfoExtractor):
'ext': 'flv',
'thumbnail': thumbnail_url,
}
+
+ def _extract_folder(self, url, folder_id):
+ webpage = self._download_webpage(url, folder_id)
+ urls = orderedSet(re.findall(r'<a href="(https?://livestre\.am/.*?)"', webpage))
+
+ return {
+ '_type': 'playlist',
+ 'id': folder_id,
+ 'entries': [{
+ '_type': 'url',
+ 'url': video_url,
+ } for video_url in urls],
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ id = mobj.group('id')
+ user = mobj.group('user')
+ url_type = mobj.group('type')
+ if url_type == 'folder':
+ return self._extract_folder(url, id)
+ else:
+ return self._extract_video(user, id)
+
+
+# The server doesn't support HEAD request, the generic extractor can't detect
+# the redirection
+class LivestreamShortenerIE(InfoExtractor):
+ IE_NAME = 'livestream:shortener'
+ IE_DESC = False # Do not list
+ _VALID_URL = r'https?://livestre\.am/(?P<id>.+)'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ id = mobj.group('id')
+ webpage = self._download_webpage(url, id)
+
+ return {
+ '_type': 'url',
+ 'url': self._og_search_url(webpage),
+ }
diff --git a/youtube_dl/extractor/mailru.py b/youtube_dl/extractor/mailru.py
index f819c09b3..7460d81cd 100644
--- a/youtube_dl/extractor/mailru.py
+++ b/youtube_dl/extractor/mailru.py
@@ -2,7 +2,6 @@
from __future__ import unicode_literals
import re
-import datetime
from .common import InfoExtractor
@@ -10,28 +9,48 @@ from .common import InfoExtractor
class MailRuIE(InfoExtractor):
IE_NAME = 'mailru'
IE_DESC = 'Видео@Mail.Ru'
- _VALID_URL = r'http://(?:www\.)?my\.mail\.ru/video/.*#video=/?(?P<id>[^/]+/[^/]+/[^/]+/\d+)'
+ _VALID_URL = r'http://(?:www\.)?my\.mail\.ru/(?:video/.*#video=/?(?P<idv1>(?:[^/]+/){3}\d+)|(?:(?P<idv2prefix>(?:[^/]+/){2})video/(?P<idv2suffix>[^/]+/\d+))\.html)'
- _TEST = {
- 'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
- 'md5': 'dea205f03120046894db4ebb6159879a',
- 'info_dict': {
- 'id': '46301138',
- 'ext': 'mp4',
- 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
- 'upload_date': '20140224',
- 'uploader': 'sonypicturesrus',
- 'uploader_id': 'sonypicturesrus@mail.ru',
- 'duration': 184,
- }
- }
+ _TESTS = [
+ {
+ 'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76',
+ 'md5': 'dea205f03120046894db4ebb6159879a',
+ 'info_dict': {
+ 'id': '46301138',
+ 'ext': 'mp4',
+ 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро',
+ 'timestamp': 1393232740,
+ 'upload_date': '20140224',
+ 'uploader': 'sonypicturesrus',
+ 'uploader_id': 'sonypicturesrus@mail.ru',
+ 'duration': 184,
+ },
+ },
+ {
+ 'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
+ 'md5': '00a91a58c3402204dcced523777b475f',
+ 'info_dict': {
+ 'id': '46843144',
+ 'ext': 'mp4',
+ 'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion',
+ 'timestamp': 1397217632,
+ 'upload_date': '20140411',
+ 'uploader': 'hitech',
+ 'uploader_id': 'hitech@corp.mail.ru',
+ 'duration': 245,
+ },
+ },
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ video_id = mobj.group('idv1')
+
+ if not video_id:
+ video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix')
video_data = self._download_json(
- 'http://videoapi.my.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON')
+ 'http://api.video.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON')
author = video_data['author']
uploader = author['name']
@@ -40,10 +59,11 @@ class MailRuIE(InfoExtractor):
movie = video_data['movie']
content_id = str(movie['contentId'])
title = movie['title']
+ if title.endswith('.mp4'):
+ title = title[:-4]
thumbnail = movie['poster']
duration = movie['duration']
- upload_date = datetime.datetime.fromtimestamp(video_data['timestamp']).strftime('%Y%m%d')
view_count = video_data['views_count']
formats = [
@@ -57,7 +77,7 @@ class MailRuIE(InfoExtractor):
'id': content_id,
'title': title,
'thumbnail': thumbnail,
- 'upload_date': upload_date,
+ 'timestamp': video_data['timestamp'],
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
diff --git a/youtube_dl/extractor/motherless.py b/youtube_dl/extractor/motherless.py
new file mode 100644
index 000000000..6229b2173
--- /dev/null
+++ b/youtube_dl/extractor/motherless.py
@@ -0,0 +1,87 @@
+from __future__ import unicode_literals
+
+import datetime
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ unified_strdate,
+)
+
+
+class MotherlessIE(InfoExtractor):
+ _VALID_URL = r'http://(?:www\.)?motherless\.com/(?P<id>[A-Z0-9]+)'
+ _TESTS = [
+ {
+ 'url': 'http://motherless.com/AC3FFE1',
+ 'md5': '5527fef81d2e529215dad3c2d744a7d9',
+ 'info_dict': {
+ 'id': 'AC3FFE1',
+ 'ext': 'flv',
+ 'title': 'Fucked in the ass while playing PS3',
+ 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
+ 'upload_date': '20100913',
+ 'uploader_id': 'famouslyfuckedup',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'age_limit': 18,
+ }
+ },
+ {
+ 'url': 'http://motherless.com/532291B',
+ 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
+ 'info_dict': {
+ 'id': '532291B',
+ 'ext': 'mp4',
+ 'title': 'Amazing girl playing the omegle game, PERFECT!',
+ 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen', 'game', 'hairy'],
+ 'upload_date': '20140622',
+ 'uploader_id': 'Sulivana7x',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'age_limit': 18,
+ }
+ }
+ ]
+
+ def _real_extract(self,url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_regex(r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
+
+ video_url = self._html_search_regex(r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video_url')
+ age_limit = self._rta_search(webpage)
+
+ view_count = self._html_search_regex(r'<strong>Views</strong>\s+([^<]+)<', webpage, 'view_count')
+
+ upload_date = self._html_search_regex(r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload_date')
+ if 'Ago' in upload_date:
+ days = int(re.search(r'([0-9]+)', upload_date).group(1))
+ upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')
+ else:
+ upload_date = unified_strdate(upload_date)
+
+ like_count = self._html_search_regex(r'<strong>Favorited</strong>\s+([^<]+)<', webpage, 'like_count')
+
+ comment_count = webpage.count('class="media-comment-contents"')
+ uploader_id = self._html_search_regex(r'"thumb-member-username">\s+<a href="/m/([^"]+)"', webpage, 'uploader_id')
+
+ categories = self._html_search_meta('keywords', webpage)
+ if categories:
+ categories = [cat.strip() for cat in categories.split(',')]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'upload_date': upload_date,
+ 'uploader_id': uploader_id,
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'categories': categories,
+ 'view_count': int_or_none(view_count.replace(',', '')),
+ 'like_count': int_or_none(like_count.replace(',', '')),
+ 'comment_count': comment_count,
+ 'age_limit': age_limit,
+ 'url': video_url,
+ }
diff --git a/youtube_dl/extractor/mpora.py b/youtube_dl/extractor/mpora.py
index 39d6feb98..387935d4d 100644
--- a/youtube_dl/extractor/mpora.py
+++ b/youtube_dl/extractor/mpora.py
@@ -28,7 +28,7 @@ class MporaIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
data_json = self._search_regex(
- r"new FM\.Player\('[^']+',\s*(\{.*?)\);\n", webpage, 'json')
+ r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;", webpage, 'json')
data = json.loads(data_json)
diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py
index d75241d3f..af9490ccc 100644
--- a/youtube_dl/extractor/mtv.py
+++ b/youtube_dl/extractor/mtv.py
@@ -22,6 +22,7 @@ def _media_xml_tag(tag):
class MTVServicesInfoExtractor(InfoExtractor):
_MOBILE_TEMPLATE = None
+
@staticmethod
def _id_from_uri(uri):
return uri.split(':')[-1]
@@ -35,6 +36,9 @@ class MTVServicesInfoExtractor(InfoExtractor):
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
return base + m.group('finalid')
+ def _get_feed_url(self, uri):
+ return self._FEED_URL
+
def _get_thumbnail_url(self, uri, itemdoc):
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
thumb_node = itemdoc.find(search_path)
@@ -80,6 +84,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
})
except (KeyError, TypeError):
raise ExtractorError('Invalid rendition field.')
+ self._sort_formats(formats)
return formats
def _get_video_info(self, itemdoc):
@@ -135,10 +140,10 @@ class MTVServicesInfoExtractor(InfoExtractor):
def _get_videos_info(self, uri):
video_id = self._id_from_uri(uri)
+ feed_url = self._get_feed_url(uri)
data = compat_urllib_parse.urlencode({'uri': uri})
-
idoc = self._download_xml(
- self._FEED_URL + '?' + data, video_id,
+ feed_url + '?' + data, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
return [self._get_video_info(item) for item in idoc.findall('.//item')]
@@ -159,6 +164,37 @@ class MTVServicesInfoExtractor(InfoExtractor):
return self._get_videos_info(mgid)
+class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
+ IE_NAME = 'mtvservices:embedded'
+ _VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)'
+
+ _TEST = {
+ # From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/
+ 'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906',
+ 'md5': 'cb349b21a7897164cede95bd7bf3fbb9',
+ 'info_dict': {
+ 'id': '1043906',
+ 'ext': 'mp4',
+ 'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds',
+ 'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.',
+ },
+ }
+
+ def _get_feed_url(self, uri):
+ video_id = self._id_from_uri(uri)
+ site_id = uri.replace(video_id, '')
+ config_url = 'http://media.mtvnservices.com/pmt/e1/players/{0}/config.xml'.format(site_id)
+ config_doc = self._download_xml(config_url, video_id)
+ feed_node = config_doc.find('.//feed')
+ feed_url = feed_node.text.strip().split('?')[0]
+ return feed_url
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ mgid = mobj.group('mgid')
+ return self._get_videos_info(mgid)
+
+
class MTVIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)^https?://
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py
index 4cab30631..c0231c197 100644
--- a/youtube_dl/extractor/naver.py
+++ b/youtube_dl/extractor/naver.py
@@ -1,4 +1,6 @@
# encoding: utf-8
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
@@ -12,12 +14,13 @@ class NaverIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvcast\.naver\.com/v/(?P<id>\d+)'
_TEST = {
- u'url': u'http://tvcast.naver.com/v/81652',
- u'file': u'81652.mp4',
- u'info_dict': {
- u'title': u'[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
- u'description': u'합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
- u'upload_date': u'20130903',
+ 'url': 'http://tvcast.naver.com/v/81652',
+ 'info_dict': {
+ 'id': '81652',
+ 'ext': 'mp4',
+ 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
+ 'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
+ 'upload_date': '20130903',
},
}
@@ -28,7 +31,7 @@ class NaverIE(InfoExtractor):
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
- raise ExtractorError(u'couldn\'t extract vid and key')
+ raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,})
@@ -39,22 +42,27 @@ class NaverIE(InfoExtractor):
})
info = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/videoInfo.nhn?' + query,
- video_id, u'Downloading video info')
+ video_id, 'Downloading video info')
urls = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/playableEncodingOption.nhn?' + query_urls,
- video_id, u'Downloading video formats info')
+ video_id, 'Downloading video formats info')
formats = []
for format_el in urls.findall('EncodingOptions/EncodingOption'):
domain = format_el.find('Domain').text
- if domain.startswith('rtmp'):
- continue
- formats.append({
+ f = {
'url': domain + format_el.find('uri').text,
'ext': 'mp4',
'width': int(format_el.find('width').text),
'height': int(format_el.find('height').text),
- })
+ }
+ if domain.startswith('rtmp'):
+ f.update({
+ 'ext': 'flv',
+ 'rtmp_protocol': '1', # rtmpt
+ })
+ formats.append(f)
+ self._sort_formats(formats)
return {
'id': video_id,
diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py
index 1a63ab56a..aa34665d1 100644
--- a/youtube_dl/extractor/nbc.py
+++ b/youtube_dl/extractor/nbc.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
import re
+import json
from .common import InfoExtractor
from ..utils import find_xpath_attr, compat_str
@@ -31,30 +32,68 @@ class NBCIE(InfoExtractor):
class NBCNewsIE(InfoExtractor):
- _VALID_URL = r'https?://www\.nbcnews\.com/video/.+?/(?P<id>\d+)'
+ _VALID_URL = r'''(?x)https?://www\.nbcnews\.com/
+ ((video/.+?/(?P<id>\d+))|
+ (feature/[^/]+/(?P<title>.+)))
+ '''
- _TEST = {
- 'url': 'http://www.nbcnews.com/video/nbc-news/52753292',
- 'md5': '47abaac93c6eaf9ad37ee6c4463a5179',
- 'info_dict': {
- 'id': '52753292',
- 'ext': 'flv',
- 'title': 'Crew emerges after four-month Mars food study',
- 'description': 'md5:24e632ffac72b35f8b67a12d1b6ddfc1',
+ _TESTS = [
+ {
+ 'url': 'http://www.nbcnews.com/video/nbc-news/52753292',
+ 'md5': '47abaac93c6eaf9ad37ee6c4463a5179',
+ 'info_dict': {
+ 'id': '52753292',
+ 'ext': 'flv',
+ 'title': 'Crew emerges after four-month Mars food study',
+ 'description': 'md5:24e632ffac72b35f8b67a12d1b6ddfc1',
+ },
},
- }
+ {
+ 'url': 'http://www.nbcnews.com/feature/edward-snowden-interview/how-twitter-reacted-snowden-interview-n117236',
+ 'md5': 'b2421750c9f260783721d898f4c42063',
+ 'info_dict': {
+ 'id': 'I1wpAI_zmhsQ',
+ 'ext': 'flv',
+ 'title': 'How Twitter Reacted To The Snowden Interview',
+ 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
+ },
+ 'add_ie': ['ThePlatform'],
+ },
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- all_info = self._download_xml('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, video_id)
- info = all_info.find('video')
+ if video_id is not None:
+ all_info = self._download_xml('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, video_id)
+ info = all_info.find('video')
- return {
- 'id': video_id,
- 'title': info.find('headline').text,
- 'ext': 'flv',
- 'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text,
- 'description': compat_str(info.find('caption').text),
- 'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text,
- }
+ return {
+ 'id': video_id,
+ 'title': info.find('headline').text,
+ 'ext': 'flv',
+ 'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text,
+ 'description': compat_str(info.find('caption').text),
+ 'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text,
+ }
+ else:
+ # "feature" pages use theplatform.com
+ title = mobj.group('title')
+ webpage = self._download_webpage(url, title)
+ bootstrap_json = self._search_regex(
+ r'var bootstrapJson = ({.+})\s*$', webpage, 'bootstrap json',
+ flags=re.MULTILINE)
+ bootstrap = json.loads(bootstrap_json)
+ info = bootstrap['results'][0]['video']
+ playlist_url = info['fallbackPlaylistUrl'] + '?form=MPXNBCNewsAPI'
+ mpxid = info['mpxId']
+ all_videos = self._download_json(playlist_url, title)['videos']
+ # The response contains additional videos
+ info = next(v for v in all_videos if v['mpxId'] == mpxid)
+
+ return {
+ '_type': 'url',
+ # We get the best quality video
+ 'url': info['videoAssets'][-1]['publicUrl'],
+ 'ie_key': 'ThePlatform',
+ }
diff --git a/youtube_dl/extractor/ndr.py b/youtube_dl/extractor/ndr.py
index 0650f9564..3d6096e46 100644
--- a/youtube_dl/extractor/ndr.py
+++ b/youtube_dl/extractor/ndr.py
@@ -4,7 +4,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ qualities,
+)
class NDRIE(InfoExtractor):
@@ -45,17 +49,16 @@ class NDRIE(InfoExtractor):
page = self._download_webpage(url, video_id, 'Downloading page')
- title = self._og_search_title(page)
+ title = self._og_search_title(page).strip()
description = self._og_search_description(page)
+ if description:
+ description = description.strip()
- mobj = re.search(
- r'<div class="duration"><span class="min">(?P<minutes>\d+)</span>:<span class="sec">(?P<seconds>\d+)</span></div>',
- page)
- duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
+ duration = int_or_none(self._html_search_regex(r'duration: (\d+),\n', page, 'duration', fatal=False))
formats = []
- mp3_url = re.search(r'''{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page)
+ mp3_url = re.search(r'''\{src:'(?P<audio>[^']+)', type:"audio/mp3"},''', page)
if mp3_url:
formats.append({
'url': mp3_url.group('audio'),
@@ -64,13 +67,15 @@ class NDRIE(InfoExtractor):
thumbnail = None
- video_url = re.search(r'''3: {src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
+ video_url = re.search(r'''3: \{src:'(?P<video>.+?)\.hi\.mp4', type:"video/mp4"},''', page)
if video_url:
- thumbnail = self._html_search_regex(r'(?m)title: "NDR PLAYER",\s*poster: "([^"]+)",',
- page, 'thumbnail', fatal=False)
- if thumbnail:
- thumbnail = 'http://www.ndr.de' + thumbnail
- for format_id in ['lo', 'hi', 'hq']:
+ thumbnails = re.findall(r'''\d+: \{src: "([^"]+)"(?: \|\| '[^']+')?, quality: '([^']+)'}''', page)
+ if thumbnails:
+ quality_key = qualities(['xs', 's', 'm', 'l', 'xl'])
+ largest = max(thumbnails, key=lambda thumb: quality_key(thumb[1]))
+ thumbnail = 'http://www.ndr.de' + largest[0]
+
+ for format_id in 'lo', 'hi', 'hq':
formats.append({
'url': '%s.%s.mp4' % (video_url.group('video'), format_id),
'format_id': format_id,
diff --git a/youtube_dl/extractor/ndtv.py b/youtube_dl/extractor/ndtv.py
index d81df3c10..95e7d63aa 100644
--- a/youtube_dl/extractor/ndtv.py
+++ b/youtube_dl/extractor/ndtv.py
@@ -1,22 +1,28 @@
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
-from ..utils import month_by_name
+from ..utils import (
+ month_by_name,
+ int_or_none,
+)
class NDTVIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?ndtv\.com/video/player/[^/]*/[^/]*/(?P<id>[a-z0-9]+)'
_TEST = {
- u"url": u"http://www.ndtv.com/video/player/news/ndtv-exclusive-don-t-need-character-certificate-from-rahul-gandhi-says-arvind-kejriwal/300710",
- u"file": u"300710.mp4",
- u"md5": u"39f992dbe5fb531c395d8bbedb1e5e88",
- u"info_dict": {
- u"title": u"NDTV exclusive: Don't need character certificate from Rahul Gandhi, says Arvind Kejriwal",
- u"description": u"In an exclusive interview to NDTV, Aam Aadmi Party's Arvind Kejriwal says it makes no difference to him that Rahul Gandhi said the Congress needs to learn from his party.",
- u"upload_date": u"20131208",
- u"duration": 1327,
- u"thumbnail": u"http://i.ndtvimg.com/video/images/vod/medium/2013-12/big_300710_1386518307.jpg",
+ 'url': 'http://www.ndtv.com/video/player/news/ndtv-exclusive-don-t-need-character-certificate-from-rahul-gandhi-says-arvind-kejriwal/300710',
+ 'md5': '39f992dbe5fb531c395d8bbedb1e5e88',
+ 'info_dict': {
+ 'id': '300710',
+ 'ext': 'mp4',
+ 'title': "NDTV exclusive: Don't need character certificate from Rahul Gandhi, says Arvind Kejriwal",
+ 'description': 'md5:ab2d4b4a6056c5cb4caa6d729deabf02',
+ 'upload_date': '20131208',
+ 'duration': 1327,
+ 'thumbnail': 'http://i.ndtvimg.com/video/images/vod/medium/2013-12/big_300710_1386518307.jpg',
},
}
@@ -27,13 +33,12 @@ class NDTVIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
filename = self._search_regex(
- r"__filename='([^']+)'", webpage, u'video filename')
- video_url = (u'http://bitcast-b.bitgravity.com/ndtvod/23372/ndtv/%s' %
+ r"__filename='([^']+)'", webpage, 'video filename')
+ video_url = ('http://bitcast-b.bitgravity.com/ndtvod/23372/ndtv/%s' %
filename)
- duration_str = filename = self._search_regex(
- r"__duration='([^']+)'", webpage, u'duration', fatal=False)
- duration = None if duration_str is None else int(duration_str)
+ duration = int_or_none(self._search_regex(
+ r"__duration='([^']+)'", webpage, 'duration', fatal=False))
date_m = re.search(r'''(?x)
<p\s+class="vod_dateline">\s*
@@ -41,7 +46,7 @@ class NDTVIE(InfoExtractor):
(?P<monthname>[A-Za-z]+)\s+(?P<day>[0-9]+),\s*(?P<year>[0-9]+)
''', webpage)
upload_date = None
- assert date_m
+
if date_m is not None:
month = month_by_name(date_m.group('monthname'))
if month is not None:
@@ -49,14 +54,19 @@ class NDTVIE(InfoExtractor):
date_m.group('year'), month, int(date_m.group('day')))
description = self._og_search_description(webpage)
- READ_MORE = u' (Read more)'
+ READ_MORE = ' (Read more)'
if description.endswith(READ_MORE):
description = description[:-len(READ_MORE)]
+ title = self._og_search_title(webpage)
+ TITLE_SUFFIX = ' - NDTV'
+ if title.endswith(TITLE_SUFFIX):
+ title = title[:-len(TITLE_SUFFIX)]
+
return {
'id': video_id,
'url': video_url,
- 'title': self._og_search_title(webpage),
+ 'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
'duration': duration,
diff --git a/youtube_dl/extractor/newstube.py b/youtube_dl/extractor/newstube.py
index 2fd5b8f04..551bd4d7a 100644
--- a/youtube_dl/extractor/newstube.py
+++ b/youtube_dl/extractor/newstube.py
@@ -4,18 +4,19 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..utils import ExtractorError
class NewstubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newstube\.ru/media/(?P<id>.+)'
_TEST = {
- 'url': 'http://newstube.ru/media/na-korable-progress-prodolzhaetsya-testirovanie-sistemy-kurs',
+ 'url': 'http://www.newstube.ru/media/telekanal-cnn-peremestil-gorod-slavyansk-v-krym',
'info_dict': {
- 'id': 'd156a237-a6e9-4111-a682-039995f721f1',
+ 'id': '728e0ef2-e187-4012-bac0-5a081fdcb1f6',
'ext': 'flv',
- 'title': 'На корабле «Прогресс» продолжается тестирование системы «Курс»',
- 'description': 'md5:d0cbe7b4a6f600552617e48548d5dc77',
- 'duration': 20.04,
+ 'title': 'Телеканал CNN переместил город Славянск в Крым',
+ 'description': 'md5:419a8c9f03442bc0b0a794d689360335',
+ 'duration': 31.05,
},
'params': {
# rtmp download
@@ -40,6 +41,10 @@ class NewstubeIE(InfoExtractor):
def ns(s):
return s.replace('/', '/%(ns)s') % {'ns': '{http://app1.newstube.ru/N2SiteWS/player.asmx}'}
+ error_message = player.find(ns('./ErrorMessage'))
+ if error_message is not None:
+ raise ExtractorError('%s returned error: %s' % (self.IE_NAME, error_message.text), expected=True)
+
session_id = player.find(ns('./SessionId')).text
media_info = player.find(ns('./Medias/MediaInfo'))
title = media_info.find(ns('./Name')).text
diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py
index 517a72561..c0c139b5d 100644
--- a/youtube_dl/extractor/niconico.py
+++ b/youtube_dl/extractor/niconico.py
@@ -8,10 +8,9 @@ from ..utils import (
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
- compat_str,
-
- ExtractorError,
unified_strdate,
+ parse_duration,
+ int_or_none,
)
@@ -30,6 +29,7 @@ class NiconicoIE(InfoExtractor):
'uploader_id': '2698420',
'upload_date': '20131123',
'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
+ 'duration': 33,
},
'params': {
'username': 'ydl.niconico@gmail.com',
@@ -37,17 +37,20 @@ class NiconicoIE(InfoExtractor):
},
}
- _VALID_URL = r'^https?://(?:www\.|secure\.)?nicovideo\.jp/watch/([a-z][a-z][0-9]+)(?:.*)$'
+ _VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)'
_NETRC_MACHINE = 'niconico'
+ # Determine whether the downloader uses authentication to download video
+ _AUTHENTICATE = False
def _real_initialize(self):
- self._login()
+ if self._downloader.params.get('username', None) is not None:
+ self._AUTHENTICATE = True
+
+ if self._AUTHENTICATE:
+ self._login()
def _login(self):
(username, password) = self._get_login_info()
- if username is None:
- # Login is required
- raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
# Log in
login_form_strs = {
@@ -79,44 +82,66 @@ class NiconicoIE(InfoExtractor):
'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id, video_id,
note='Downloading video info page')
- # Get flv info
- flv_info_webpage = self._download_webpage(
- 'http://flapi.nicovideo.jp/api/getflv?v=' + video_id,
- video_id, 'Downloading flv info')
+ if self._AUTHENTICATE:
+ # Get flv info
+ flv_info_webpage = self._download_webpage(
+ 'http://flapi.nicovideo.jp/api/getflv?v=' + video_id,
+ video_id, 'Downloading flv info')
+ else:
+ # Get external player info
+ ext_player_info = self._download_webpage(
+ 'http://ext.nicovideo.jp/thumb_watch/' + video_id, video_id)
+ thumb_play_key = self._search_regex(
+ r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
+
+ # Get flv info
+ flv_info_data = compat_urllib_parse.urlencode({
+ 'k': thumb_play_key,
+ 'v': video_id
+ })
+ flv_info_request = compat_urllib_request.Request(
+ 'http://ext.nicovideo.jp/thumb_watch', flv_info_data,
+ {'Content-Type': 'application/x-www-form-urlencoded'})
+ flv_info_webpage = self._download_webpage(
+ flv_info_request, video_id,
+ note='Downloading flv info', errnote='Unable to download flv info')
+
video_real_url = compat_urlparse.parse_qs(flv_info_webpage)['url'][0]
# Start extracting information
- video_title = video_info.find('.//title').text
- video_extension = video_info.find('.//movie_type').text
- video_format = video_extension.upper()
- video_thumbnail = video_info.find('.//thumbnail_url').text
- video_description = video_info.find('.//description').text
- video_uploader_id = video_info.find('.//user_id').text
- video_upload_date = unified_strdate(video_info.find('.//first_retrieve').text.split('+')[0])
- video_view_count = video_info.find('.//view_counter').text
- video_webpage_url = video_info.find('.//watch_url').text
-
- # uploader
- video_uploader = video_uploader_id
- url = 'http://seiga.nicovideo.jp/api/user/info?id=' + video_uploader_id
- try:
- user_info = self._download_xml(
- url, video_id, note='Downloading user information')
- video_uploader = user_info.find('.//nickname').text
- except ExtractorError as err:
- self._downloader.report_warning('Unable to download user info webpage: %s' % compat_str(err))
+ title = video_info.find('.//title').text
+ extension = video_info.find('.//movie_type').text
+ video_format = extension.upper()
+ thumbnail = video_info.find('.//thumbnail_url').text
+ description = video_info.find('.//description').text
+ upload_date = unified_strdate(video_info.find('.//first_retrieve').text.split('+')[0])
+ view_count = int_or_none(video_info.find('.//view_counter').text)
+ comment_count = int_or_none(video_info.find('.//comment_num').text)
+ duration = parse_duration(video_info.find('.//length').text)
+ webpage_url = video_info.find('.//watch_url').text
+
+ if video_info.find('.//ch_id') is not None:
+ uploader_id = video_info.find('.//ch_id').text
+ uploader = video_info.find('.//ch_name').text
+ elif video_info.find('.//user_id') is not None:
+ uploader_id = video_info.find('.//user_id').text
+ uploader = video_info.find('.//user_nickname').text
+ else:
+ uploader_id = uploader = None
return {
'id': video_id,
'url': video_real_url,
- 'title': video_title,
- 'ext': video_extension,
+ 'title': title,
+ 'ext': extension,
'format': video_format,
- 'thumbnail': video_thumbnail,
- 'description': video_description,
- 'uploader': video_uploader,
- 'upload_date': video_upload_date,
- 'uploader_id': video_uploader_id,
- 'view_count': video_view_count,
- 'webpage_url': video_webpage_url,
+ 'thumbnail': thumbnail,
+ 'description': description,
+ 'uploader': uploader,
+ 'upload_date': upload_date,
+ 'uploader_id': uploader_id,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ 'duration': duration,
+ 'webpage_url': webpage_url,
}
diff --git a/youtube_dl/extractor/ninegag.py b/youtube_dl/extractor/ninegag.py
index c2e7b67c7..33daa0dec 100644
--- a/youtube_dl/extractor/ninegag.py
+++ b/youtube_dl/extractor/ninegag.py
@@ -47,7 +47,7 @@ class NineGagIE(InfoExtractor):
webpage = self._download_webpage(url, display_id)
post_view = json.loads(self._html_search_regex(
- r'var postView = new app\.PostView\({\s*post:\s*({.+?}),', webpage, 'post view'))
+ r'var postView = new app\.PostView\({\s*post:\s*({.+?}),\s*posts:\s*prefetchedCurrentPost', webpage, 'post view'))
youtube_id = post_view['videoExternalId']
title = post_view['title']
diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py
index d451cd1bf..da203538d 100644
--- a/youtube_dl/extractor/noco.py
+++ b/youtube_dl/extractor/noco.py
@@ -35,7 +35,7 @@ class NocoIE(InfoExtractor):
video_id = mobj.group('id')
medias = self._download_json(
- 'http://api.noco.tv/1.0/video/medias/%s' % video_id, video_id, 'Downloading video JSON')
+ 'https://api.noco.tv/1.0/video/medias/%s' % video_id, video_id, 'Downloading video JSON')
formats = []
@@ -43,7 +43,7 @@ class NocoIE(InfoExtractor):
format_id = fmt['quality_key']
file = self._download_json(
- 'http://api.noco.tv/1.0/video/file/%s/fr/%s' % (format_id.lower(), video_id),
+ 'https://api.noco.tv/1.0/video/file/%s/fr/%s' % (format_id.lower(), video_id),
video_id, 'Downloading %s video JSON' % format_id)
file_url = file['file']
@@ -71,7 +71,7 @@ class NocoIE(InfoExtractor):
self._sort_formats(formats)
show = self._download_json(
- 'http://api.noco.tv/1.0/shows/show/%s' % video_id, video_id, 'Downloading show JSON')[0]
+ 'https://api.noco.tv/1.0/shows/show/%s' % video_id, video_id, 'Downloading show JSON')[0]
upload_date = unified_strdate(show['indexed'])
uploader = show['partner_name']
diff --git a/youtube_dl/extractor/nowness.py b/youtube_dl/extractor/nowness.py
index b1bcb7e54..1c5e9401f 100644
--- a/youtube_dl/extractor/nowness.py
+++ b/youtube_dl/extractor/nowness.py
@@ -4,9 +4,7 @@ import re
from .brightcove import BrightcoveIE
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
+from ..utils import ExtractorError
class NownessIE(InfoExtractor):
@@ -14,9 +12,10 @@ class NownessIE(InfoExtractor):
_TEST = {
'url': 'http://www.nowness.com/day/2013/6/27/3131/candor--the-art-of-gesticulation',
- 'file': '2520295746001.mp4',
- 'md5': '0ece2f70a7bd252c7b00f3070182d418',
+ 'md5': '068bc0202558c2e391924cb8cc470676',
'info_dict': {
+ 'id': '2520295746001',
+ 'ext': 'mp4',
'description': 'Candor: The Art of Gesticulation',
'uploader': 'Nowness',
'title': 'Candor: The Art of Gesticulation',
diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py
new file mode 100644
index 000000000..fbcbe1f40
--- /dev/null
+++ b/youtube_dl/extractor/npo.py
@@ -0,0 +1,62 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ unified_strdate,
+)
+
+
+class NPOIE(InfoExtractor):
+ IE_NAME = 'npo.nl'
+ _VALID_URL = r'https?://www\.npo\.nl/[^/]+/[^/]+/(?P<id>[^/?]+)'
+
+ _TEST = {
+ 'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
+ 'md5': '4b3f9c429157ec4775f2c9cb7b911016',
+ 'info_dict': {
+ 'id': 'VPWON_1220719',
+ 'ext': 'mp4',
+ 'title': 'Nieuwsuur',
+ 'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
+ 'upload_date': '20140622',
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ metadata = self._download_json(
+ 'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
+ video_id,
+ # We have to remove the javascript callback
+ transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//epc', r'\1', j)
+ )
+ token_page = self._download_webpage(
+ 'http://ida.omroep.nl/npoplayer/i.js',
+ video_id,
+ note='Downloading token'
+ )
+ token = self._search_regex(r'npoplayer.token = "(.+?)"', token_page, 'token')
+ streams_info = self._download_json(
+ 'http://ida.omroep.nl/odi/?prid=%s&puboptions=h264_std&adaptive=yes&token=%s' % (video_id, token),
+ video_id
+ )
+
+ stream_info = self._download_json(
+ streams_info['streams'][0] + '&type=json',
+ video_id,
+ 'Downloading stream info'
+ )
+
+ return {
+ 'id': video_id,
+ 'title': metadata['titel'],
+ 'ext': 'mp4',
+ 'url': stream_info['url'],
+ 'description': metadata['info'],
+ 'thumbnail': metadata['images'][-1]['url'],
+ 'upload_date': unified_strdate(metadata['gidsdatum']),
+ }
diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py
index e6d68b836..96f0ae1eb 100644
--- a/youtube_dl/extractor/nrk.py
+++ b/youtube_dl/extractor/nrk.py
@@ -4,7 +4,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ float_or_none,
+ unified_strdate,
+)
class NRKIE(InfoExtractor):
@@ -64,4 +68,77 @@ class NRKIE(InfoExtractor):
'title': data['title'],
'description': data['description'],
'thumbnail': thumbnail,
- } \ No newline at end of file
+ }
+
+
+class NRKTVIE(InfoExtractor):
+ _VALID_URL = r'http://tv\.nrk(?:super)?\.no/(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})'
+
+ _TESTS = [
+ {
+ 'url': 'http://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
+ 'md5': '7b96112fbae1faf09a6f9ae1aff6cb84',
+ 'info_dict': {
+ 'id': 'MUHH48000314',
+ 'ext': 'flv',
+ 'title': '20 spørsmål',
+ 'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
+ 'upload_date': '20140523',
+ 'duration': 1741.52,
+ }
+ },
+ {
+ 'url': 'http://tv.nrk.no/program/mdfp15000514',
+ 'md5': 'af01795a31f1cf7265c8657534d8077b',
+ 'info_dict': {
+ 'id': 'mdfp15000514',
+ 'ext': 'flv',
+ 'title': 'Kunnskapskanalen: Grunnlovsjubiléet - Stor ståhei for ingenting',
+ 'description': 'md5:654c12511f035aed1e42bdf5db3b206a',
+ 'upload_date': '20140524',
+ 'duration': 4605.0,
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ page = self._download_webpage(url, video_id)
+
+ title = self._html_search_meta('title', page, 'title')
+ description = self._html_search_meta('description', page, 'description')
+ thumbnail = self._html_search_regex(r'data-posterimage="([^"]+)"', page, 'thumbnail', fatal=False)
+ upload_date = unified_strdate(self._html_search_meta('rightsfrom', page, 'upload date', fatal=False))
+ duration = float_or_none(
+ self._html_search_regex(r'data-duration="([^"]+)"', page, 'duration', fatal=False))
+
+ formats = []
+
+ f4m_url = re.search(r'data-media="([^"]+)"', page)
+ if f4m_url:
+ formats.append({
+ 'url': f4m_url.group(1) + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124',
+ 'format_id': 'f4m',
+ 'ext': 'flv',
+ })
+
+ m3u8_url = re.search(r'data-hls-media="([^"]+)"', page)
+ if m3u8_url:
+ formats.append({
+ 'url': m3u8_url.group(1),
+ 'format_id': 'm3u8',
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'upload_date': upload_date,
+ 'duration': duration,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/ntv.py b/youtube_dl/extractor/ntv.py
index 733ed6c26..ed60314ec 100644
--- a/youtube_dl/extractor/ntv.py
+++ b/youtube_dl/extractor/ntv.py
@@ -5,7 +5,6 @@ import re
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
unescapeHTML
)
diff --git a/youtube_dl/extractor/nuvid.py b/youtube_dl/extractor/nuvid.py
index f0befa116..280328b78 100644
--- a/youtube_dl/extractor/nuvid.py
+++ b/youtube_dl/extractor/nuvid.py
@@ -3,6 +3,11 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..utils import (
+ parse_duration,
+ unified_strdate,
+ compat_urllib_request,
+)
class NuvidIE(InfoExtractor):
@@ -13,8 +18,10 @@ class NuvidIE(InfoExtractor):
'info_dict': {
'id': '1310741',
'ext': 'mp4',
- "title": "Horny babes show their awesome bodeis and",
- "age_limit": 18,
+ 'title': 'Horny babes show their awesome bodeis and',
+ 'duration': 129,
+ 'upload_date': '20140508',
+ 'age_limit': 18,
}
}
@@ -22,27 +29,41 @@ class NuvidIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- murl = url.replace('://www.', '://m.')
- webpage = self._download_webpage(murl, video_id)
-
- title = self._html_search_regex(
- r'<div class="title">\s+<h2[^>]*>([^<]+)</h2>',
- webpage, 'title').strip()
+ formats = []
- url_end = self._html_search_regex(
- r'href="(/mp4/[^"]+)"[^>]*data-link_type="mp4"',
- webpage, 'video_url')
- video_url = 'http://m.nuvid.com' + url_end
+ for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]:
+ request = compat_urllib_request.Request(
+ 'http://m.nuvid.com/play/%s' % video_id)
+ request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed)
+ webpage = self._download_webpage(
+ request, video_id, 'Downloading %s page' % format_id)
+ video_url = self._html_search_regex(
+ r'<a href="([^"]+)"\s*>Continue to watch video', webpage, '%s video URL' % format_id, fatal=False)
+ if not video_url:
+ continue
+ formats.append({
+ 'url': video_url,
+ 'format_id': format_id,
+ })
+ webpage = self._download_webpage(
+ 'http://m.nuvid.com/video/%s' % video_id, video_id, 'Downloading video page')
+ title = self._html_search_regex(
+ r'<div class="title">\s+<h2[^>]*>([^<]+)</h2>', webpage, 'title').strip()
thumbnail = self._html_search_regex(
r'href="(/thumbs/[^"]+)"[^>]*data-link_type="thumbs"',
webpage, 'thumbnail URL', fatal=False)
+ duration = parse_duration(self._html_search_regex(
+ r'Length:\s*<span>(\d{2}:\d{2})</span>',webpage, 'duration', fatal=False))
+ upload_date = unified_strdate(self._html_search_regex(
+ r'Added:\s*<span>(\d{4}-\d{2}-\d{2})</span>', webpage, 'upload date', fatal=False))
return {
'id': video_id,
- 'url': video_url,
- 'ext': 'mp4',
'title': title,
- 'thumbnail': thumbnail,
+ 'thumbnail': 'http://m.nuvid.com%s' % thumbnail,
+ 'duration': duration,
+ 'upload_date': upload_date,
'age_limit': 18,
- }
+ 'formats': formats,
+ } \ No newline at end of file
diff --git a/youtube_dl/extractor/photobucket.py b/youtube_dl/extractor/photobucket.py
index a59953497..8aa69c46e 100644
--- a/youtube_dl/extractor/photobucket.py
+++ b/youtube_dl/extractor/photobucket.py
@@ -1,10 +1,10 @@
from __future__ import unicode_literals
-import datetime
import json
import re
from .common import InfoExtractor
+from ..utils import compat_urllib_parse
class PhotobucketIE(InfoExtractor):
@@ -14,6 +14,7 @@ class PhotobucketIE(InfoExtractor):
'file': 'zpsc0c3b9fa.mp4',
'md5': '7dabfb92b0a31f6c16cebc0f8e60ff99',
'info_dict': {
+ 'timestamp': 1367669341,
'upload_date': '20130504',
'uploader': 'rachaneronas',
'title': 'Tired of Link Building? Try BacklinkMyDomain.com!',
@@ -32,11 +33,12 @@ class PhotobucketIE(InfoExtractor):
info_json = self._search_regex(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (.*?)\);',
webpage, 'info json')
info = json.loads(info_json)
+ url = compat_urllib_parse.unquote(self._html_search_regex(r'file=(.+\.mp4)', info['linkcodes']['html'], 'url'))
return {
'id': video_id,
- 'url': info['downloadUrl'],
+ 'url': url,
'uploader': info['username'],
- 'upload_date': datetime.date.fromtimestamp(info['creationDate']).strftime('%Y%m%d'),
+ 'timestamp': info['creationDate'],
'title': info['title'],
'ext': video_extension,
'thumbnail': info['thumbUrl'],
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 7dd3dca0d..4118ee956 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -45,7 +45,7 @@ class PornHubIE(InfoExtractor):
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
video_uploader = self._html_search_regex(
- r'(?s)<div class="video-info-row">\s*From:&nbsp;.+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
+ r'(?s)From:&nbsp;.+?<(?:a href="/users/|<span class="username)[^>]+>(.+?)<',
webpage, 'uploader', fatal=False)
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
if thumbnail:
diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index e4c4ad714..da64a1a7b 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -158,19 +158,19 @@ class ProSiebenSat1IE(InfoExtractor):
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
- r'clipId=(\d+)',
+ r'clip[iI]d=(\d+)',
]
_TITLE_REGEXES = [
r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
r'<header class="clearfix">\s*<h3>(.+?)</h3>',
r'<!-- start video -->\s*<h1>(.+?)</h1>',
- r'<div class="ep-femvideos-pi4-video-txt">\s*<h2>(.+?)</h2>',
+ r'<h1 class="att-name">\s*(.+?)</h1>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
- r'<p>(.+?)</p>\s*<div class="ep-femvideos-pi4-video-footer">',
+ r'<p class="att-description">\s*(.+?)\s*</p>',
]
_UPLOAD_DATE_REGEXES = [
r'<meta property="og:published_time" content="(.+?)">',
diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py
new file mode 100644
index 000000000..ba3dd707f
--- /dev/null
+++ b/youtube_dl/extractor/rai.py
@@ -0,0 +1,122 @@
+from __future__ import unicode_literals
+
+import re
+
+from .subtitles import SubtitlesInfoExtractor
+from ..utils import (
+ parse_duration,
+ unified_strdate,
+ compat_urllib_parse,
+)
+
+
+class RaiIE(SubtitlesInfoExtractor):
+ _VALID_URL = r'(?P<url>http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html)'
+ _TESTS = [
+ {
+ 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
+ 'md5': 'c064c0b2d09c278fb293116ef5d0a32d',
+ 'info_dict': {
+ 'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
+ 'ext': 'mp4',
+ 'title': 'Report del 07/04/2014',
+ 'description': 'md5:f27c544694cacb46a078db84ec35d2d9',
+ 'upload_date': '20140407',
+ 'duration': 6160,
+ }
+ },
+ {
+ 'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
+ 'md5': '8bb9c151924ce241b74dd52ef29ceafa',
+ 'info_dict': {
+ 'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
+ 'ext': 'mp4',
+ 'title': 'TG PRIMO TEMPO',
+ 'description': '',
+ 'upload_date': '20140612',
+ 'duration': 1758,
+ },
+ 'skip': 'Error 404',
+ },
+ {
+ 'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html',
+ 'md5': '35cf7c229f22eeef43e48b5cf923bef0',
+ 'info_dict': {
+ 'id': '7aafdea9-0e5d-49d5-88a6-7e65da67ae13',
+ 'ext': 'mp4',
+ 'title': 'State of the Net, Antonella La Carpia: regole virali',
+ 'description': 'md5:b0ba04a324126903e3da7763272ae63c',
+ 'upload_date': '20140613',
+ },
+ 'skip': 'Error 404',
+ },
+ {
+ 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html',
+ 'md5': '35694f062977fe6619943f08ed935730',
+ 'info_dict': {
+ 'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132',
+ 'ext': 'mp4',
+ 'title': 'Alluvione in Sardegna e dissesto idrogeologico',
+ 'description': 'Edizione delle ore 20:30 ',
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ media = self._download_json('%s?json' % mobj.group('url'), video_id, 'Downloading video JSON')
+
+ title = media.get('name')
+ description = media.get('desc')
+ thumbnail = media.get('image_300') or media.get('image_medium') or media.get('image')
+ duration = parse_duration(media.get('length'))
+ uploader = media.get('author')
+ upload_date = unified_strdate(media.get('date'))
+
+ formats = []
+
+ for format_id in ['wmv', 'm3u8', 'mediaUri', 'h264']:
+ media_url = media.get(format_id)
+ if not media_url:
+ continue
+ formats.append({
+ 'url': media_url,
+ 'format_id': format_id,
+ 'ext': 'mp4',
+ })
+
+ if self._downloader.params.get('listsubtitles', False):
+ page = self._download_webpage(url, video_id)
+ self._list_available_subtitles(video_id, page)
+ return
+
+ subtitles = {}
+ if self._have_to_download_any_subtitles:
+ page = self._download_webpage(url, video_id)
+ subtitles = self.extract_subtitles(video_id, page)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'upload_date': upload_date,
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ def _get_available_subtitles(self, video_id, webpage):
+ subtitles = {}
+ m = re.search(r'<meta name="closedcaption" content="(?P<captions>[^"]+)"', webpage)
+ if m:
+ captions = m.group('captions')
+ STL_EXT = '.stl'
+ SRT_EXT = '.srt'
+ if captions.endswith(STL_EXT):
+ captions = captions[:-len(STL_EXT)] + SRT_EXT
+ subtitles['it'] = 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions)
+ return subtitles \ No newline at end of file
diff --git a/youtube_dl/extractor/slutload.py b/youtube_dl/extractor/slutload.py
index ecc0abfda..e6e7d0865 100644
--- a/youtube_dl/extractor/slutload.py
+++ b/youtube_dl/extractor/slutload.py
@@ -3,9 +3,6 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
-)
class SlutloadIE(InfoExtractor):
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index d6f453fb9..14ec9452d 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -1,7 +1,6 @@
# encoding: utf-8
from __future__ import unicode_literals
-import json
import re
import itertools
@@ -12,6 +11,7 @@ from ..utils import (
compat_urllib_parse,
ExtractorError,
+ int_or_none,
unified_strdate,
)
@@ -44,7 +44,8 @@ class SoundcloudIE(InfoExtractor):
"upload_date": "20121011",
"description": "No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o'd",
"uploader": "E.T. ExTerrestrial Music",
- "title": "Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1"
+ "title": "Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1",
+ "duration": 143,
}
},
# not streamable song
@@ -57,6 +58,7 @@ class SoundcloudIE(InfoExtractor):
'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
'uploader': 'The Royal Concept',
'upload_date': '20120521',
+ 'duration': 227,
},
'params': {
# rtmp
@@ -74,6 +76,7 @@ class SoundcloudIE(InfoExtractor):
'uploader': 'jaimeMF',
'description': 'test chars: \"\'/\\ä↭',
'upload_date': '20131209',
+ 'duration': 9,
},
},
# downloadable song
@@ -87,6 +90,7 @@ class SoundcloudIE(InfoExtractor):
'description': 'Vocals',
'uploader': 'Sim Gretina',
'upload_date': '20130815',
+ #'duration': 42,
},
},
]
@@ -119,6 +123,7 @@ class SoundcloudIE(InfoExtractor):
'title': info['title'],
'description': info['description'],
'thumbnail': thumbnail,
+ 'duration': int_or_none(info.get('duration'), 1000),
}
formats = []
if info.get('downloadable', False):
@@ -250,7 +255,7 @@ class SoundcloudSetIE(SoundcloudIE):
class SoundcloudUserIE(SoundcloudIE):
- _VALID_URL = r'https?://(www\.)?soundcloud\.com/(?P<user>[^/]+)(/?(tracks/)?)?(\?.*)?$'
+ _VALID_URL = r'https?://(www\.)?soundcloud\.com/(?P<user>[^/]+)/?((?P<rsrc>tracks|likes)/?)?(\?.*)?$'
IE_NAME = 'soundcloud:user'
# it's in tests/test_playlists.py
@@ -259,24 +264,31 @@ class SoundcloudUserIE(SoundcloudIE):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group('user')
+ resource = mobj.group('rsrc')
+ if resource is None:
+ resource = 'tracks'
+ elif resource == 'likes':
+ resource = 'favorites'
url = 'http://soundcloud.com/%s/' % uploader
resolv_url = self._resolv_url(url)
user = self._download_json(
resolv_url, uploader, 'Downloading user info')
- base_url = 'http://api.soundcloud.com/users/%s/tracks.json?' % uploader
+ base_url = 'http://api.soundcloud.com/users/%s/%s.json?' % (uploader, resource)
entries = []
for i in itertools.count():
data = compat_urllib_parse.urlencode({
'offset': i * 50,
+ 'limit': 50,
'client_id': self._CLIENT_ID,
})
new_entries = self._download_json(
base_url + data, uploader, 'Downloading track page %s' % (i + 1))
- entries.extend(self._extract_info_dict(e, quiet=True) for e in new_entries)
- if len(new_entries) < 50:
+ if len(new_entries) == 0:
+ self.to_screen('%s: End page received' % uploader)
break
+ entries.extend(self._extract_info_dict(e, quiet=True) for e in new_entries)
return {
'_type': 'playlist',
diff --git a/youtube_dl/extractor/soundgasm.py b/youtube_dl/extractor/soundgasm.py
new file mode 100644
index 000000000..a4f8ce6c3
--- /dev/null
+++ b/youtube_dl/extractor/soundgasm.py
@@ -0,0 +1,40 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class SoundgasmIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_\-]+)/(?P<title>[0-9a-zA-Z_\-]+)'
+ _TEST = {
+ 'url': 'http://soundgasm.net/u/ytdl/Piano-sample',
+ 'md5': '010082a2c802c5275bb00030743e75ad',
+ 'info_dict': {
+ 'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9',
+ 'ext': 'm4a',
+ 'title': 'ytdl_Piano-sample',
+ 'description': 'Royalty Free Sample Music'
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('title')
+ audio_title = mobj.group('user') + '_' + mobj.group('title')
+ webpage = self._download_webpage(url, display_id)
+ audio_url = self._html_search_regex(
+ r'(?s)m4a\:\s"([^"]+)"', webpage, 'audio URL')
+ audio_id = re.split('\/|\.', audio_url)[-2]
+ description = self._html_search_regex(
+ r'(?s)<li>Description:\s(.*?)<\/li>', webpage, 'description',
+ fatal=False)
+
+ return {
+ 'id': audio_id,
+ 'display_id': display_id,
+ 'url': audio_url,
+ 'title': audio_title,
+ 'description': description
+ }
diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
index 9156d7faf..340a38440 100644
--- a/youtube_dl/extractor/spiegel.py
+++ b/youtube_dl/extractor/spiegel.py
@@ -1,3 +1,4 @@
+# encoding: utf-8
from __future__ import unicode_literals
import re
@@ -9,18 +10,33 @@ class SpiegelIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
_TESTS = [{
'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
- 'file': '1259285.mp4',
'md5': '2c2754212136f35fb4b19767d242f66e',
'info_dict': {
+ 'id': '1259285',
+ 'ext': 'mp4',
'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv',
+ 'description': 'md5:8029d8310232196eb235d27575a8b9f4',
+ 'duration': 49,
},
- },
- {
+ }, {
'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html',
- 'file': '1309159.mp4',
'md5': 'f2cdf638d7aa47654e251e1aee360af1',
'info_dict': {
+ 'id': '1309159',
+ 'ext': 'mp4',
'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers',
+ 'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
+ 'duration': 983,
+ },
+ }, {
+ 'url': 'http://www.spiegel.de/video/johann-westhauser-videobotschaft-des-hoehlenforschers-video-1502367.html',
+ 'md5': '54f58ba0e752e3c07bc2a26222dd0acf',
+ 'info_dict': {
+ 'id': '1502367',
+ 'ext': 'mp4',
+ 'title': 'Videobotschaft: Höhlenforscher Westhauser dankt seinen Rettern',
+ 'description': 'md5:c6f1ec11413ebd1088b6813943e5fc91',
+ 'duration': 42,
},
}]
@@ -30,18 +46,20 @@ class SpiegelIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
- video_title = self._html_search_regex(
+ title = self._html_search_regex(
r'<div class="module-title">(.*?)</div>', webpage, 'title')
+ description = self._html_search_meta('description', webpage, 'description')
+
+ base_url = self._search_regex(
+ r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL')
- xml_url = 'http://video2.spiegel.de/flash/' + video_id + '.xml'
- idoc = self._download_xml(
- xml_url, video_id,
- note='Downloading XML', errnote='Failed to download XML')
+ xml_url = base_url + video_id + '.xml'
+ idoc = self._download_xml(xml_url, video_id)
formats = [
{
'format_id': n.tag.rpartition('type')[2],
- 'url': 'http://video2.spiegel.de/flash/' + n.find('./filename').text,
+ 'url': base_url + n.find('./filename').text,
'width': int(n.find('./width').text),
'height': int(n.find('./height').text),
'abr': int(n.find('./audiobitrate').text),
@@ -59,7 +77,8 @@ class SpiegelIE(InfoExtractor):
return {
'id': video_id,
- 'title': video_title,
+ 'title': title,
+ 'description': description,
'duration': duration,
'formats': formats,
}
diff --git a/youtube_dl/extractor/spiegeltv.py b/youtube_dl/extractor/spiegeltv.py
new file mode 100644
index 000000000..7f388aced
--- /dev/null
+++ b/youtube_dl/extractor/spiegeltv.py
@@ -0,0 +1,81 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+from .common import InfoExtractor
+
+
+class SpiegeltvIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/filme/(?P<id>[\-a-z0-9]+)'
+ _TEST = {
+ 'url': 'http://www.spiegel.tv/filme/flug-mh370/',
+ 'info_dict': {
+ 'id': 'flug-mh370',
+ 'ext': 'm4v',
+ 'title': 'Flug MH370',
+ 'description': 'Das Rätsel um die Boeing 777 der Malaysia-Airlines',
+ 'thumbnail': 're:http://.*\.jpg$',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title')
+
+ apihost = 'http://spiegeltv-ivms2-restapi.s3.amazonaws.com'
+ version_json = self._download_json(
+ '%s/version.json' % apihost, video_id,
+ note='Downloading version information')
+ version_name = version_json['version_name']
+
+ slug_json = self._download_json(
+ '%s/%s/restapi/slugs/%s.json' % (apihost, version_name, video_id),
+ video_id,
+ note='Downloading object information')
+ oid = slug_json['object_id']
+
+ media_json = self._download_json(
+ '%s/%s/restapi/media/%s.json' % (apihost, version_name, oid),
+ video_id, note='Downloading media information')
+ uuid = media_json['uuid']
+ is_wide = media_json['is_wide']
+
+ server_json = self._download_json(
+ 'http://www.spiegel.tv/streaming_servers/', video_id,
+ note='Downloading server information')
+ server = server_json[0]['endpoint']
+
+ thumbnails = []
+ for image in media_json['images']:
+ thumbnails.append({
+ 'url': image['url'],
+ 'width': image['width'],
+ 'height': image['height'],
+ })
+
+ description = media_json['subtitle']
+ duration = media_json['duration_in_ms'] / 1000.
+
+ if is_wide:
+ format = '16x9'
+ else:
+ format = '4x3'
+
+ url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v'
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'url': url,
+ 'ext': 'm4v',
+ 'description': description,
+ 'duration': duration,
+ 'thumbnails': thumbnails
+ } \ No newline at end of file
diff --git a/youtube_dl/extractor/steam.py b/youtube_dl/extractor/steam.py
index 1d8d57224..af689e2c2 100644
--- a/youtube_dl/extractor/steam.py
+++ b/youtube_dl/extractor/steam.py
@@ -53,7 +53,7 @@ class SteamIE(InfoExtractor):
'ext': 'mp4',
'upload_date': '20140329',
'title': 'FRONTIERS - Final Greenlight Trailer',
- 'description': "The final trailer for the Steam Greenlight launch. Hooray, progress! Here's the official Greenlight page: http://steamcommunity.com/sharedfiles/filedetails/?id=242472205",
+ 'description': 'md5:6df4fe8dd494ae811869672b0767e025',
'uploader': 'AAD Productions',
'uploader_id': 'AtomicAgeDogGames',
}
diff --git a/youtube_dl/extractor/streamcz.py b/youtube_dl/extractor/streamcz.py
index 7362904db..73efe9542 100644
--- a/youtube_dl/extractor/streamcz.py
+++ b/youtube_dl/extractor/streamcz.py
@@ -5,13 +5,16 @@ import re
import json
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+ int_or_none,
+ compat_str,
+)
class StreamCZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<videoid>.+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti',
'md5': '6d3ca61a8d0633c9c542b92fcb936b0c',
'info_dict': {
@@ -22,7 +25,18 @@ class StreamCZIE(InfoExtractor):
'thumbnail': 'http://im.stream.cz/episode/52961d7e19d423f8f06f0100',
'duration': 256,
},
- }
+ }, {
+ 'url': 'http://www.stream.cz/blanik/10002447-tri-roky-pro-mazanka',
+ 'md5': '246272e753e26bbace7fcd9deca0650c',
+ 'info_dict': {
+ 'id': '10002447',
+ 'ext': 'mp4',
+ 'title': 'Kancelář Blaník: Tři roky pro Mazánka',
+ 'description': 'md5:9177695a8b756a0a8ab160de4043b392',
+ 'thumbnail': 'http://im.stream.cz/episode/537f838c50c11f8d21320000',
+ 'duration': 368,
+ },
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -57,7 +71,7 @@ class StreamCZIE(InfoExtractor):
self._sort_formats(formats)
return {
- 'id': str(jsonData['id']),
+ 'id': compat_str(jsonData['episode_id']),
'title': self._og_search_title(webpage),
'thumbnail': jsonData['episode_image_original_url'].replace('//', 'http://'),
'formats': formats,
diff --git a/youtube_dl/extractor/swrmediathek.py b/youtube_dl/extractor/swrmediathek.py
new file mode 100644
index 000000000..6c688c520
--- /dev/null
+++ b/youtube_dl/extractor/swrmediathek.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import parse_duration
+
+
+class SWRMediathekIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
+
+ _TESTS = [{
+ 'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6',
+ 'md5': '8c5f6f0172753368547ca8413a7768ac',
+ 'info_dict': {
+ 'id': '849790d0-dab8-11e3-a953-0026b975f2e6',
+ 'ext': 'mp4',
+ 'title': 'SWR odysso',
+ 'description': 'md5:2012e31baad36162e97ce9eb3f157b8a',
+ 'thumbnail': 're:^http:.*\.jpg$',
+ 'duration': 2602,
+ 'upload_date': '20140515',
+ 'uploader': 'SWR Fernsehen',
+ 'uploader_id': '990030',
+ },
+ }, {
+ 'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
+ 'md5': 'b10ab854f912eecc5a6b55cd6fc1f545',
+ 'info_dict': {
+ 'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
+ 'ext': 'mp4',
+ 'title': 'Nachtcafé - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen',
+ 'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'duration': 5305,
+ 'upload_date': '20140516',
+ 'uploader': 'SWR Fernsehen',
+ 'uploader_id': '990030',
+ },
+ }, {
+ 'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6',
+ 'md5': '4382e4ef2c9d7ce6852535fa867a0dd3',
+ 'info_dict': {
+ 'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6',
+ 'ext': 'mp3',
+ 'title': 'Saša Stanišic: Vor dem Fest',
+ 'description': 'md5:5b792387dc3fbb171eb709060654e8c9',
+ 'thumbnail': 're:http://.*\.jpg',
+ 'duration': 3366,
+ 'upload_date': '20140520',
+ 'uploader': 'SWR 2',
+ 'uploader_id': '284670',
+ }
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ video = self._download_json(
+ 'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id, video_id, 'Downloading video JSON')
+
+ attr = video['attr']
+ media_type = attr['entry_etype']
+
+ formats = []
+ for entry in video['sub']:
+ if entry['name'] != 'entry_media':
+ continue
+
+ entry_attr = entry['attr']
+ codec = entry_attr['val0']
+ quality = int(entry_attr['val1'])
+
+ fmt = {
+ 'url': entry_attr['val2'],
+ 'quality': quality,
+ }
+
+ if media_type == 'Video':
+ fmt.update({
+ 'format_note': ['144p', '288p', '544p'][quality-1],
+ 'vcodec': codec,
+ })
+ elif media_type == 'Audio':
+ fmt.update({
+ 'acodec': codec,
+ })
+ formats.append(fmt)
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': attr['entry_title'],
+ 'description': attr['entry_descl'],
+ 'thumbnail': attr['entry_image_16_9'],
+ 'duration': parse_duration(attr['entry_durat']),
+ 'upload_date': attr['entry_pdatet'][:-4],
+ 'uploader': attr['channel_title'],
+ 'uploader_id': attr['channel_idkey'],
+ 'formats': formats,
+ } \ No newline at end of file
diff --git a/youtube_dl/extractor/tagesschau.py b/youtube_dl/extractor/tagesschau.py
new file mode 100644
index 000000000..25b9864ad
--- /dev/null
+++ b/youtube_dl/extractor/tagesschau.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class TagesschauIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/video/video(?P<id>-?[0-9]+)\.html'
+
+ _TESTS = [{
+ 'url': 'http://www.tagesschau.de/multimedia/video/video1399128.html',
+ 'md5': 'bcdeac2194fb296d599ce7929dfa4009',
+ 'info_dict': {
+ 'id': '1399128',
+ 'ext': 'mp4',
+ 'title': 'Harald Range, Generalbundesanwalt, zu den Ermittlungen',
+ 'description': 'md5:69da3c61275b426426d711bde96463ab',
+ 'thumbnail': 're:^http:.*\.jpg$',
+ },
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/video/video-5964.html',
+ 'md5': '66652566900963a3f962333579eeffcf',
+ 'info_dict': {
+ 'id': '5964',
+ 'ext': 'mp4',
+ 'title': 'Nahost-Konflikt: Israel bombadiert Ziele im Gazastreifen und Westjordanland',
+ 'description': 'md5:07bfc78c48eec3145ed4805299a1900a',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }]
+
+ _FORMATS = {
+ 's': {'width': 256, 'height': 144, 'quality': 1},
+ 'm': {'width': 512, 'height': 288, 'quality': 2},
+ 'l': {'width': 960, 'height': 544, 'quality': 3},
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ if video_id.startswith('-'):
+ display_id = video_id.strip('-')
+ else:
+ display_id = video_id
+
+ webpage = self._download_webpage(url, display_id)
+
+ playerpage = self._download_webpage(
+ 'http://www.tagesschau.de/multimedia/video/video%s~player_autoplay-true.html' % video_id,
+ display_id, 'Downloading player page')
+
+ medias = re.findall(
+ r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
+ playerpage)
+
+ formats = []
+ for url, ext, res in medias:
+ f = {
+ 'format_id': res + '_' + ext,
+ 'url': url,
+ 'ext': ext,
+ }
+ f.update(self._FORMATS.get(res, {}))
+ formats.append(f)
+
+ self._sort_formats(formats)
+
+ thumbnail = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
+
+ return {
+ 'id': display_id,
+ 'title': self._og_search_title(webpage).strip(),
+ 'thumbnail': 'http://www.tagesschau.de' + thumbnail,
+ 'formats': formats,
+ 'description': self._og_search_description(webpage).strip(),
+ }
diff --git a/youtube_dl/extractor/teachertube.py b/youtube_dl/extractor/teachertube.py
new file mode 100644
index 000000000..2c2113b14
--- /dev/null
+++ b/youtube_dl/extractor/teachertube.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ qualities,
+ determine_ext,
+)
+
+
+class TeacherTubeIE(InfoExtractor):
+ IE_NAME = 'teachertube'
+ IE_DESC = 'teachertube.com videos'
+
+ _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)'
+
+ _TESTS = [{
+ 'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997',
+ 'md5': 'f9434ef992fd65936d72999951ee254c',
+ 'info_dict': {
+ 'id': '339997',
+ 'ext': 'mp4',
+ 'title': 'Measures of dispersion from a frequency table',
+ 'description': 'Measures of dispersion from a frequency table',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.teachertube.com/viewVideo.php?video_id=340064',
+ 'md5': '0d625ec6bc9bf50f70170942ad580676',
+ 'info_dict': {
+ 'id': '340064',
+ 'ext': 'mp4',
+ 'title': 'How to Make Paper Dolls _ Paper Art Projects',
+ 'description': 'Learn how to make paper dolls in this simple',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }, {
+ 'url': 'http://www.teachertube.com/music.php?music_id=8805',
+ 'md5': '01e8352006c65757caf7b961f6050e21',
+ 'info_dict': {
+ 'id': '8805',
+ 'ext': 'mp3',
+ 'title': 'PER ASPERA AD ASTRA',
+ 'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P',
+ },
+ }, {
+ 'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790',
+ 'md5': '9c79fbb2dd7154823996fc28d4a26998',
+ 'info_dict': {
+ 'id': '297790',
+ 'ext': 'mp4',
+ 'title': 'Intro Video - Schleicher',
+ 'description': 'Intro Video - Why to flip, how flipping will',
+ },
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_meta('title', webpage, 'title')
+ TITLE_SUFFIX = ' - TeacherTube'
+ if title.endswith(TITLE_SUFFIX):
+ title = title[:-len(TITLE_SUFFIX)].strip()
+
+ description = self._html_search_meta('description', webpage, 'description')
+ if description:
+ description = description.strip()
+
+ quality = qualities(['mp3', 'flv', 'mp4'])
+
+ media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage)
+ media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage))
+ media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage))
+
+ formats = [
+ {
+ 'url': media_url,
+ 'quality': quality(determine_ext(media_url))
+ } for media_url in set(media_urls)
+ ]
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': self._html_search_regex(r'\'image\'\s*:\s*["\']([^"\']+)["\']', webpage, 'thumbnail'),
+ 'formats': formats,
+ 'description': description,
+ }
+
+
+class TeacherTubeUserIE(InfoExtractor):
+ IE_NAME = 'teachertube:user:collection'
+ IE_DESC = 'teachertube.com user and collection videos'
+
+ _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?'
+
+ _MEDIA_RE = r'(?s)"sidebar_thumb_time">[0-9:]+</div>.+?<a href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)">'
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ user_id = mobj.group('user')
+
+ urls = []
+ webpage = self._download_webpage(url, user_id)
+ urls.extend(re.findall(self._MEDIA_RE, webpage))
+
+ pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[1:-1]
+ for p in pages:
+ more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
+ webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages) + 1))
+ urls.extend(re.findall(self._MEDIA_RE, webpage))
+
+ entries = []
+ for url in urls:
+ entries.append(self.url_result(url, 'TeacherTube'))
+
+ return self.playlist_result(entries, user_id)
diff --git a/youtube_dl/extractor/teachingchannel.py b/youtube_dl/extractor/teachingchannel.py
new file mode 100644
index 000000000..117afa9bf
--- /dev/null
+++ b/youtube_dl/extractor/teachingchannel.py
@@ -0,0 +1,33 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from .ooyala import OoyalaIE
+
+
+class TeachingChannelIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.teachingchannel\.org/videos/(?P<title>.+)'
+
+ _TEST = {
+ 'url': 'https://www.teachingchannel.org/videos/teacher-teaming-evolution',
+ 'info_dict': {
+ 'id': 'F3bnlzbToeI6pLEfRyrlfooIILUjz4nM',
+ 'ext': 'mp4',
+ 'title': 'A History of Teaming',
+ 'description': 'md5:2a9033db8da81f2edffa4c99888140b3',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ title = mobj.group('title')
+ webpage = self._download_webpage(url, title)
+ ooyala_code = self._search_regex(
+ r'data-embed-code=\'(.+?)\'', webpage, 'ooyala code')
+
+ return OoyalaIE._build_url_result(ooyala_code)
diff --git a/youtube_dl/extractor/ted.py b/youtube_dl/extractor/ted.py
index d260c91c2..bce32a873 100644
--- a/youtube_dl/extractor/ted.py
+++ b/youtube_dl/extractor/ted.py
@@ -27,7 +27,7 @@ class TEDIE(SubtitlesInfoExtractor):
'''
_TESTS = [{
'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html',
- 'md5': '4ea1dada91e4174b53dac2bb8ace429d',
+ 'md5': 'fc94ac279feebbce69f21c0c6ee82810',
'info_dict': {
'id': '102',
'ext': 'mp4',
diff --git a/youtube_dl/extractor/tenplay.py b/youtube_dl/extractor/tenplay.py
new file mode 100644
index 000000000..8477840fc
--- /dev/null
+++ b/youtube_dl/extractor/tenplay.py
@@ -0,0 +1,84 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class TenPlayIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?ten(play)?\.com\.au/.+'
+ _TEST = {
+ 'url': 'http://tenplay.com.au/ten-insider/extra/season-2013/tenplay-tv-your-way',
+ #'md5': 'd68703d9f73dc8fccf3320ab34202590',
+ 'info_dict': {
+ 'id': '2695695426001',
+ 'ext': 'flv',
+ 'title': 'TENplay: TV your way',
+ 'description': 'Welcome to a new TV experience. Enjoy a taste of the TENplay benefits.',
+ 'timestamp': 1380150606.889,
+ 'upload_date': '20130925',
+ 'uploader': 'TENplay',
+ },
+ 'params': {
+ 'skip_download': True, # Requires rtmpdump
+ }
+ }
+
+ _video_fields = [
+ "id", "name", "shortDescription", "longDescription", "creationDate",
+ "publishedDate", "lastModifiedDate", "customFields", "videoStillURL",
+ "thumbnailURL", "referenceId", "length", "playsTotal",
+ "playsTrailingWeek", "renditions", "captioning", "startDate", "endDate"]
+
+ def _real_extract(self, url):
+ webpage = self._download_webpage(url, url)
+ video_id = self._html_search_regex(
+ r'videoID: "(\d+?)"', webpage, 'video_id')
+ api_token = self._html_search_regex(
+ r'apiToken: "([a-zA-Z0-9-_\.]+?)"', webpage, 'api_token')
+ title = self._html_search_regex(
+ r'<meta property="og:title" content="\s*(.*?)\s*"\s*/?\s*>',
+ webpage, 'title')
+
+ json = self._download_json('https://api.brightcove.com/services/library?command=find_video_by_id&video_id=%s&token=%s&video_fields=%s' % (video_id, api_token, ','.join(self._video_fields)), title)
+
+ formats = []
+ for rendition in json['renditions']:
+ url = rendition['remoteUrl'] or rendition['url']
+ protocol = 'rtmp' if url.startswith('rtmp') else 'http'
+ ext = 'flv' if protocol == 'rtmp' else rendition['videoContainer'].lower()
+
+ if protocol == 'rtmp':
+ url = url.replace('&mp4:', '')
+
+ formats.append({
+ 'format_id': '_'.join(['rtmp', rendition['videoContainer'].lower(), rendition['videoCodec'].lower()]),
+ 'width': rendition['frameWidth'],
+ 'height': rendition['frameHeight'],
+ 'tbr': rendition['encodingRate'] / 1024,
+ 'filesize': rendition['size'],
+ 'protocol': protocol,
+ 'ext': ext,
+ 'vcodec': rendition['videoCodec'].lower(),
+ 'container': rendition['videoContainer'].lower(),
+ 'url': url,
+ })
+
+ return {
+ 'id': video_id,
+ 'display_id': json['referenceId'],
+ 'title': json['name'],
+ 'description': json['shortDescription'] or json['longDescription'],
+ 'formats': formats,
+ 'thumbnails': [{
+ 'url': json['videoStillURL']
+ }, {
+ 'url': json['thumbnailURL']
+ }],
+ 'thumbnail': json['videoStillURL'],
+ 'duration': json['length'] / 1000,
+ 'timestamp': float(json['creationDate']) / 1000,
+ 'uploader': json['customFields']['production_company_distributor'] if 'production_company_distributor' in json['customFields'] else 'TENplay',
+ 'view_count': json['playsTotal']
+ }
diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py
index f15780ef5..b6b2dba9c 100644
--- a/youtube_dl/extractor/theplatform.py
+++ b/youtube_dl/extractor/theplatform.py
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
import re
import json
@@ -18,17 +20,17 @@ class ThePlatformIE(InfoExtractor):
_TEST = {
# from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/
- u'url': u'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
- u'info_dict': {
- u'id': u'e9I_cZgTgIPd',
- u'ext': u'flv',
- u'title': u'Blackberry\'s big, bold Z30',
- u'description': u'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
- u'duration': 247,
+ 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true',
+ 'info_dict': {
+ 'id': 'e9I_cZgTgIPd',
+ 'ext': 'flv',
+ 'title': 'Blackberry\'s big, bold Z30',
+ 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.',
+ 'duration': 247,
},
- u'params': {
+ 'params': {
# rtmp download
- u'skip_download': True,
+ 'skip_download': True,
},
}
@@ -39,7 +41,7 @@ class ThePlatformIE(InfoExtractor):
error_msg = next(
n.attrib['abstract']
for n in meta.findall(_x('.//smil:ref'))
- if n.attrib.get('title') == u'Geographic Restriction')
+ if n.attrib.get('title') == 'Geographic Restriction')
except StopIteration:
pass
else:
@@ -101,8 +103,7 @@ class ThePlatformIE(InfoExtractor):
config_url = url+ '&form=json'
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
- config_json = self._download_webpage(config_url, video_id, u'Downloading config')
- config = json.loads(config_json)
+ config = self._download_json(config_url, video_id, 'Downloading config')
smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
else:
smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
diff --git a/youtube_dl/extractor/toypics.py b/youtube_dl/extractor/toypics.py
index 34008afc6..0f389bd93 100644
--- a/youtube_dl/extractor/toypics.py
+++ b/youtube_dl/extractor/toypics.py
@@ -1,10 +1,13 @@
+# -*- coding:utf-8 -*-
+from __future__ import unicode_literals
+
from .common import InfoExtractor
import re
class ToypicsIE(InfoExtractor):
IE_DESC = 'Toypics user profile'
- _VALID_URL = r'http://videos\.toypics\.net/view/(?P<id>[0-9]+)/.*'
+ _VALID_URL = r'https?://videos\.toypics\.net/view/(?P<id>[0-9]+)/.*'
_TEST = {
'url': 'http://videos.toypics.net/view/514/chancebulged,-2-1/',
'md5': '16e806ad6d6f58079d210fe30985e08b',
@@ -61,7 +64,7 @@ class ToypicsUserIE(InfoExtractor):
note='Downloading page %d/%d' % (n, page_count))
urls.extend(
re.findall(
- r'<p class="video-entry-title">\n\s*<a href="(http://videos.toypics.net/view/[^"]+)">',
+ r'<p class="video-entry-title">\s+<a href="(https?://videos.toypics.net/view/[^"]+)">',
lpage))
return {
diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py
index 36bc36ad8..08a48c05a 100644
--- a/youtube_dl/extractor/tube8.py
+++ b/youtube_dl/extractor/tube8.py
@@ -17,9 +17,10 @@ class Tube8IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/){2}(?P<id>\d+)'
_TEST = {
'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
- 'file': '229795.mp4',
- 'md5': 'e9e0b0c86734e5e3766e653509475db0',
+ 'md5': '44bf12b98313827dd52d35b8706a4ea0',
'info_dict': {
+ 'id': '229795',
+ 'ext': 'mp4',
'description': 'hot teen Kasia grinding',
'uploader': 'unknown',
'title': 'Kasia music video',
diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 544369068..2882c1809 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
@@ -10,14 +11,27 @@ from ..utils import (
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)($|/)'
- _TEST = {
+ _TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
- 'file': '54196191430.mp4',
'md5': '479bb068e5b16462f5176a6828829767',
'info_dict': {
- "title": "tatiana maslany news"
+ 'id': '54196191430',
+ 'ext': 'mp4',
+ 'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
+ 'description': 'md5:dfac39636969fe6bf1caa2d50405f069',
+ 'thumbnail': 're:http://.*\.jpg',
}
- }
+ }, {
+ 'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all',
+ 'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359',
+ 'info_dict': {
+ 'id': '90208453769',
+ 'ext': 'mp4',
+ 'title': '5SOS STRUM ;)',
+ 'description': 'md5:dba62ac8639482759c8eb10ce474586a',
+ 'thumbnail': 're:http://.*\.jpg',
+ }
+ }]
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
@@ -48,6 +62,7 @@ class TumblrIE(InfoExtractor):
return [{'id': video_id,
'url': video_url,
'title': video_title,
+ 'description': self._html_search_meta('description', webpage),
'thumbnail': video_thumbnail,
'ext': ext
}]
diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py
index e4bb3b949..488b10df9 100644
--- a/youtube_dl/extractor/ustream.py
+++ b/youtube_dl/extractor/ustream.py
@@ -11,29 +11,36 @@ from ..utils import (
class UstreamIE(InfoExtractor):
- _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed)/(?P<videoID>\d+)'
+ _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)'
IE_NAME = 'ustream'
_TEST = {
'url': 'http://www.ustream.tv/recorded/20274954',
- 'file': '20274954.flv',
'md5': '088f151799e8f572f84eb62f17d73e5c',
'info_dict': {
- "uploader": "Young Americans for Liberty",
- "title": "Young Americans for Liberty February 7, 2012 2:28 AM",
+ 'id': '20274954',
+ 'ext': 'flv',
+ 'uploader': 'Young Americans for Liberty',
+ 'title': 'Young Americans for Liberty February 7, 2012 2:28 AM',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+
+ # some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990)
+ if m.group('type') == 'embed/recorded':
+ video_id = m.group('videoID')
+ desktop_url = 'http://www.ustream.tv/recorded/' + video_id
+ return self.url_result(desktop_url, 'Ustream')
if m.group('type') == 'embed':
video_id = m.group('videoID')
webpage = self._download_webpage(url, video_id)
- desktop_video_id = self._html_search_regex(r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
+ desktop_video_id = self._html_search_regex(
+ r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id')
desktop_url = 'http://www.ustream.tv/recorded/' + desktop_video_id
return self.url_result(desktop_url, 'Ustream')
- video_id = m.group('videoID')
-
video_url = 'http://tcdn.ustream.tv/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
diff --git a/youtube_dl/extractor/veoh.py b/youtube_dl/extractor/veoh.py
index d16993daf..a7953a7e7 100644
--- a/youtube_dl/extractor/veoh.py
+++ b/youtube_dl/extractor/veoh.py
@@ -7,6 +7,7 @@ from .common import InfoExtractor
from ..utils import (
compat_urllib_request,
int_or_none,
+ ExtractorError,
)
@@ -48,6 +49,7 @@ class VeohIE(InfoExtractor):
'description': 'md5:f5a11c51f8fb51d2315bca0937526891',
'uploader': 'newsy-videos',
},
+ 'skip': 'This video has been deleted.',
},
]
@@ -94,8 +96,12 @@ class VeohIE(InfoExtractor):
if video_id.startswith('v'):
rsp = self._download_xml(
r'http://www.veoh.com/api/findByPermalink?permalink=%s' % video_id, video_id, 'Downloading video XML')
- if rsp.get('stat') == 'ok':
+ stat = rsp.get('stat')
+ if stat == 'ok':
return self._extract_video(rsp.find('./videoList/video'))
+ elif stat == 'fail':
+ raise ExtractorError(
+ '%s said: %s' % (self.IE_NAME, rsp.find('./errorList/error').get('errorMessage')), expected=True)
webpage = self._download_webpage(url, video_id)
age_limit = 0
diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py
index ea34a8f16..eada13ce9 100644
--- a/youtube_dl/extractor/vevo.py
+++ b/youtube_dl/extractor/vevo.py
@@ -16,7 +16,7 @@ class VevoIE(InfoExtractor):
(currently used by MTVIE)
"""
_VALID_URL = r'''(?x)
- (?:https?://www\.vevo\.com/watch/(?:[^/]+/[^/]+/)?|
+ (?:https?://www\.vevo\.com/watch/(?:[^/]+/(?:[^/]+/)?)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
https?://videoplayer\.vevo\.com/embed/embedded\?videoId=|
vevo:)
diff --git a/youtube_dl/extractor/vh1.py b/youtube_dl/extractor/vh1.py
new file mode 100644
index 000000000..2f77e3898
--- /dev/null
+++ b/youtube_dl/extractor/vh1.py
@@ -0,0 +1,124 @@
+from __future__ import unicode_literals
+
+from .mtv import MTVIE
+
+import re
+from ..utils import fix_xml_ampersands
+
+
+class VH1IE(MTVIE):
+ IE_NAME = 'vh1.com'
+ _FEED_URL = 'http://www.vh1.com/player/embed/AS3/fullepisode/rss/'
+ _TESTS = [{
+ 'url': 'http://www.vh1.com/video/metal-evolution/full-episodes/progressive-metal/1678612/playlist.jhtml',
+ 'playlist': [
+ {
+ 'md5': '7827a7505f59633983165bbd2c119b52',
+ 'info_dict': {
+ 'id': '731565',
+ 'ext': 'mp4',
+ 'title': 'Metal Evolution: Ep. 11 Act 1',
+ 'description': 'Many rock academics have proclaimed that the truly progressive musicianship of the last 20 years has been found right here in the world of heavy metal, rather than obvious locales such as jazz, fusion or progressive rock. It stands to reason then, that much of this jaw-dropping virtuosity occurs within what\'s known as progressive metal, a genre that takes root with the likes of Rush in the \'70s, Queensryche and Fates Warning in the \'80s, and Dream Theater in the \'90s. Since then, the genre has exploded with creativity, spawning mind-bending, genre-defying acts such as Tool, Mastodon, Coheed And Cambria, Porcupine Tree, Meshuggah, A Perfect Circle and Opeth. Episode 12 looks at the extreme musicianship of these bands, as well as their often extreme literary prowess and conceptual strength, the end result being a rich level of respect and attention such challenging acts have brought upon the world of heavy metal, from a critical community usually dismissive of the form.'
+ }
+ },
+ {
+ 'md5': '34fb4b7321c546b54deda2102a61821f',
+ 'info_dict': {
+ 'id': '731567',
+ 'ext': 'mp4',
+ 'title': 'Metal Evolution: Ep. 11 Act 2',
+ 'description': 'Many rock academics have proclaimed that the truly progressive musicianship of the last 20 years has been found right here in the world of heavy metal, rather than obvious locales such as jazz, fusion or progressive rock. It stands to reason then, that much of this jaw-dropping virtuosity occurs within what\'s known as progressive metal, a genre that takes root with the likes of Rush in the \'70s, Queensryche and Fates Warning in the \'80s, and Dream Theater in the \'90s. Since then, the genre has exploded with creativity, spawning mind-bending, genre-defying acts such as Tool, Mastodon, Coheed And Cambria, Porcupine Tree, Meshuggah, A Perfect Circle and Opeth. Episode 11 looks at the extreme musicianship of these bands, as well as their often extreme literary prowess and conceptual strength, the end result being a rich level of respect and attention such challenging acts have brought upon the world of heavy metal, from a critical community usually dismissive of the form.'
+ }
+ },
+ {
+ 'md5': '813f38dba4c1b8647196135ebbf7e048',
+ 'info_dict': {
+ 'id': '731568',
+ 'ext': 'mp4',
+ 'title': 'Metal Evolution: Ep. 11 Act 3',
+ 'description': 'Many rock academics have proclaimed that the truly progressive musicianship of the last 20 years has been found right here in the world of heavy metal, rather than obvious locales such as jazz, fusion or progressive rock. It stands to reason then, that much of this jaw-dropping virtuosity occurs within what\'s known as progressive metal, a genre that takes root with the likes of Rush in the \'70s, Queensryche and Fates Warning in the \'80s, and Dream Theater in the \'90s. Since then, the genre has exploded with creativity, spawning mind-bending, genre-defying acts such as Tool, Mastodon, Coheed And Cambria, Porcupine Tree, Meshuggah, A Perfect Circle and Opeth. Episode 11 looks at the extreme musicianship of these bands, as well as their often extreme literary prowess and conceptual strength, the end result being a rich level of respect and attention such challenging acts have brought upon the world of heavy metal, from a critical community usually dismissive of the form.'
+ }
+ },
+ {
+ 'md5': '51adb72439dfaed11c799115d76e497f',
+ 'info_dict': {
+ 'id': '731569',
+ 'ext': 'mp4',
+ 'title': 'Metal Evolution: Ep. 11 Act 4',
+ 'description': 'Many rock academics have proclaimed that the truly progressive musicianship of the last 20 years has been found right here in the world of heavy metal, rather than obvious locales such as jazz, fusion or progressive rock. It stands to reason then, that much of this jaw-dropping virtuosity occurs within what\'s known as progressive metal, a genre that takes root with the likes of Rush in the \'70s, Queensryche and Fates Warning in the \'80s, and Dream Theater in the \'90s. Since then, the genre has exploded with creativity, spawning mind-bending, genre-defying acts such as Tool, Mastodon, Coheed And Cambria, Porcupine Tree, Meshuggah, A Perfect Circle and Opeth. Episode 11 looks at the extreme musicianship of these bands, as well as their often extreme literary prowess and conceptual strength, the end result being a rich level of respect and attention such challenging acts have brought upon the world of heavy metal, from a critical community usually dismissive of the form.'
+ }
+ },
+ {
+ 'md5': '93d554aaf79320703b73a95288c76a6e',
+ 'info_dict': {
+ 'id': '731570',
+ 'ext': 'mp4',
+ 'title': 'Metal Evolution: Ep. 11 Act 5',
+ 'description': 'Many rock academics have proclaimed that the truly progressive musicianship of the last 20 years has been found right here in the world of heavy metal, rather than obvious locales such as jazz, fusion or progressive rock. It stands to reason then, that much of this jaw-dropping virtuosity occurs within what\'s known as progressive metal, a genre that takes root with the likes of Rush in the \'70s, Queensryche and Fates Warning in the \'80s, and Dream Theater in the \'90s. Since then, the genre has exploded with creativity, spawning mind-bending, genre-defying acts such as Tool, Mastodon, Coheed And Cambria, Porcupine Tree, Meshuggah, A Perfect Circle and Opeth. Episode 11 looks at the extreme musicianship of these bands, as well as their often extreme literary prowess and conceptual strength, the end result being a rich level of respect and attention such challenging acts have brought upon the world of heavy metal, from a critical community usually dismissive of the form.'
+ }
+ }
+ ],
+ 'skip': 'Blocked outside the US',
+ }, {
+ # Clip
+ 'url': 'http://www.vh1.com/video/misc/706675/metal-evolution-episode-1-pre-metal-show-clip.jhtml#id=1674118',
+ 'md5': '7d67cf6d9cdc6b4f3d3ac97a55403844',
+ 'info_dict': {
+ 'id': '706675',
+ 'ext': 'mp4',
+ 'title': 'Metal Evolution: Episode 1 Pre-Metal Show Clip',
+ 'description': 'The greatest documentary ever made about Heavy Metal begins as our host Sam Dunn travels the globe to seek out the origins and influences that helped create Heavy Metal. Sam speaks to legends like Kirk Hammett, Alice Cooper, Slash, Bill Ward, Geezer Butler, Tom Morello, Ace Frehley, Lemmy Kilmister, Dave Davies, and many many more. This episode is the prologue for the 11 hour series, and Sam goes back to the very beginning to reveal how Heavy Metal was created.'
+ },
+ 'skip': 'Blocked outside the US',
+ }, {
+ # Short link
+ 'url': 'http://www.vh1.com/video/play.jhtml?id=1678353',
+ 'md5': '853192b87ad978732b67dd8e549b266a',
+ 'info_dict': {
+ 'id': '730355',
+ 'ext': 'mp4',
+ 'title': 'Metal Evolution: Episode 11 Progressive Metal Sneak',
+ 'description': 'In Metal Evolution\'s finale sneak, Sam sits with Michael Giles of King Crimson and gets feedback from Metallica guitarist Kirk Hammett on why the group was influential.'
+ },
+ 'skip': 'Blocked outside the US',
+ }, {
+ 'url': 'http://www.vh1.com/video/macklemore-ryan-lewis/900535/cant-hold-us-ft-ray-dalton.jhtml',
+ 'md5': 'b1bcb5b4380c9d7f544065589432dee7',
+ 'info_dict': {
+ 'id': '900535',
+ 'ext': 'mp4',
+ 'title': 'Macklemore & Ryan Lewis - "Can\'t Hold Us ft. Ray Dalton"',
+ 'description': 'The Heist'
+ },
+ 'skip': 'Blocked outside the US',
+ }]
+
+ _VALID_URL = r'''(?x)
+ https?://www\.vh1\.com/video/
+ (?:
+ .+?/full-episodes/.+?/(?P<playlist_id>[^/]+)/playlist\.jhtml
+ |
+ (?:
+ play.jhtml\?id=|
+ misc/.+?/.+?\.jhtml\#id=
+ )
+ (?P<video_id>[0-9]+)$
+ |
+ [^/]+/(?P<music_id>[0-9]+)/[^/]+?
+ )
+ '''
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj.group('music_id'):
+ id_field = 'vid'
+ video_id = mobj.group('music_id')
+ else:
+ video_id = mobj.group('playlist_id') or mobj.group('video_id')
+ id_field = 'id'
+ doc_url = '%s?%s=%s' % (self._FEED_URL, id_field, video_id)
+
+ idoc = self._download_xml(
+ doc_url, video_id,
+ 'Downloading info', transform_source=fix_xml_ampersands)
+ return [self._get_video_info(item) for item in idoc.findall('.//item')]
diff --git a/youtube_dl/extractor/videott.py b/youtube_dl/extractor/videott.py
index b5034b02f..a647807d0 100644
--- a/youtube_dl/extractor/videott.py
+++ b/youtube_dl/extractor/videott.py
@@ -4,7 +4,10 @@ import re
import base64
from .common import InfoExtractor
-from ..utils import unified_strdate
+from ..utils import (
+ unified_strdate,
+ int_or_none,
+)
class VideoTtIE(InfoExtractor):
@@ -50,9 +53,9 @@ class VideoTtIE(InfoExtractor):
'thumbnail': settings['config']['thumbnail'],
'upload_date': unified_strdate(video['added']),
'uploader': video['owner'],
- 'view_count': int(video['view_count']),
- 'comment_count': int(video['comment_count']),
- 'like_count': int(video['liked']),
- 'dislike_count': int(video['disliked']),
+ 'view_count': int_or_none(video['view_count']),
+ 'comment_count': None if video.get('comment_count') == '--' else int_or_none(video['comment_count']),
+ 'like_count': int_or_none(video['liked']),
+ 'dislike_count': int_or_none(video['disliked']),
'formats': formats,
} \ No newline at end of file
diff --git a/youtube_dl/extractor/vimple.py b/youtube_dl/extractor/vimple.py
new file mode 100644
index 000000000..33d370e1c
--- /dev/null
+++ b/youtube_dl/extractor/vimple.py
@@ -0,0 +1,86 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import base64
+import re
+import xml.etree.ElementTree
+import zlib
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class VimpleIE(InfoExtractor):
+ IE_DESC = 'Vimple.ru'
+ _VALID_URL = r'https?://(player.vimple.ru/iframe|vimple.ru)/(?P<id>[a-f0-9]{10,})'
+ _TESTS = [
+ # Quality: Large, from iframe
+ {
+ 'url': 'http://player.vimple.ru/iframe/b132bdfd71b546d3972f9ab9a25f201c',
+ 'info_dict': {
+ 'id': 'b132bdfd71b546d3972f9ab9a25f201c',
+ 'title': 'great-escape-minecraft.flv',
+ 'ext': 'mp4',
+ 'duration': 352,
+ 'webpage_url': 'http://vimple.ru/b132bdfd71b546d3972f9ab9a25f201c',
+ },
+ },
+ # Quality: Medium, from mainpage
+ {
+ 'url': 'http://vimple.ru/a15950562888453b8e6f9572dc8600cd',
+ 'info_dict': {
+ 'id': 'a15950562888453b8e6f9572dc8600cd',
+ 'title': 'DB 01',
+ 'ext': 'flv',
+ 'duration': 1484,
+ 'webpage_url': 'http://vimple.ru/a15950562888453b8e6f9572dc8600cd',
+ }
+ },
+ ]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ iframe_url = 'http://player.vimple.ru/iframe/%s' % video_id
+
+ iframe = self._download_webpage(
+ iframe_url, video_id,
+ note='Downloading iframe', errnote='unable to fetch iframe')
+ player_url = self._html_search_regex(
+ r'"(http://player.vimple.ru/flash/.+?)"', iframe, 'player url')
+
+ player = self._request_webpage(
+ player_url, video_id, note='Downloading swf player').read()
+
+ player = zlib.decompress(player[8:])
+
+ xml_pieces = re.findall(b'([a-zA-Z0-9 =+/]{500})', player)
+ xml_pieces = [piece[1:-1] for piece in xml_pieces]
+
+ xml_data = b''.join(xml_pieces)
+ xml_data = base64.b64decode(xml_data)
+
+ xml_data = xml.etree.ElementTree.fromstring(xml_data)
+
+ video = xml_data.find('Video')
+ quality = video.get('quality')
+ q_tag = video.find(quality.capitalize())
+
+ formats = [
+ {
+ 'url': q_tag.get('url'),
+ 'tbr': int(q_tag.get('bitrate')),
+ 'filesize': int(q_tag.get('filesize')),
+ 'format_id': quality,
+ },
+ ]
+
+ return {
+ 'id': video_id,
+ 'title': video.find('Title').text,
+ 'formats': formats,
+ 'thumbnail': video.find('Poster').get('url'),
+ 'duration': int_or_none(video.get('duration')),
+ 'webpage_url': video.find('Share').get('videoPageUrl'),
+ }
diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py
index fb082f364..918bd1098 100644
--- a/youtube_dl/extractor/vk.py
+++ b/youtube_dl/extractor/vk.py
@@ -16,7 +16,7 @@ from ..utils import (
class VKIE(InfoExtractor):
IE_NAME = 'vk.com'
- _VALID_URL = r'https?://vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:videos.*?\?.*?z=)?video(?P<videoid>.*?)(?:\?|%2F|$))'
+ _VALID_URL = r'https?://(?:m\.)?vk\.com/(?:video_ext\.php\?.*?\boid=(?P<oid>-?\d+).*?\bid=(?P<id>\d+)|(?:.+?\?.*?z=)?video(?P<videoid>.*?)(?:\?|%2F|$))'
_NETRC_MACHINE = 'vk'
_TESTS = [
@@ -27,7 +27,7 @@ class VKIE(InfoExtractor):
'id': '162222515',
'ext': 'flv',
'title': 'ProtivoGunz - Хуёвая песня',
- 'uploader': 'Noize MC',
+ 'uploader': 're:Noize MC.*',
'duration': 195,
},
},
@@ -62,11 +62,47 @@ class VKIE(InfoExtractor):
'id': '164049491',
'ext': 'mp4',
'uploader': 'Триллеры',
- 'title': '► Бойцовский клуб / Fight Club 1999 [HD 720]\u00a0',
+ 'title': '► Бойцовский клуб / Fight Club 1999 [HD 720]',
'duration': 8352,
},
'skip': 'Requires vk account credentials',
},
+ {
+ 'url': 'http://vk.com/feed?z=video-43215063_166094326%2Fbb50cacd3177146d7a',
+ 'md5': 'd82c22e449f036282d1d3f7f4d276869',
+ 'info_dict': {
+ 'id': '166094326',
+ 'ext': 'mp4',
+ 'uploader': 'Киномания - лучшее из мира кино',
+ 'title': 'Запах женщины (1992)',
+ 'duration': 9392,
+ },
+ 'skip': 'Requires vk account credentials',
+ },
+ {
+ 'url': 'http://vk.com/hd_kino_mania?z=video-43215063_168067957%2F15c66b9b533119788d',
+ 'md5': '4d7a5ef8cf114dfa09577e57b2993202',
+ 'info_dict': {
+ 'id': '168067957',
+ 'ext': 'mp4',
+ 'uploader': 'Киномания - лучшее из мира кино',
+ 'title': ' ',
+ 'duration': 7291,
+ },
+ 'skip': 'Requires vk account credentials',
+ },
+ {
+ 'url': 'http://m.vk.com/video-43215063_169084319?list=125c627d1aa1cebb83&from=wall-43215063_2566540',
+ 'md5': '0c45586baa71b7cb1d0784ee3f4e00a6',
+ 'note': 'ivi.ru embed',
+ 'info_dict': {
+ 'id': '60690',
+ 'ext': 'mp4',
+ 'title': 'Книга Илая',
+ 'duration': 6771,
+ },
+ 'skip': 'Only works from Russia',
+ },
]
def _login(self):
@@ -110,6 +146,16 @@ class VKIE(InfoExtractor):
if m_yt is not None:
self.to_screen('Youtube video detected')
return self.url_result(m_yt.group(1), 'Youtube')
+
+ m_opts = re.search(r'(?s)var\s+opts\s*=\s*({.*?});', info_page)
+ if m_opts:
+ m_opts_url = re.search(r"url\s*:\s*'([^']+)", m_opts.group(1))
+ if m_opts_url:
+ opts_url = m_opts_url.group(1)
+ if opts_url.startswith('//'):
+ opts_url = 'http:' + opts_url
+ return self.url_result(opts_url)
+
data_json = self._search_regex(r'var vars = ({.*?});', info_page, 'vars')
data = json.loads(data_json)
diff --git a/youtube_dl/extractor/vodlocker.py b/youtube_dl/extractor/vodlocker.py
new file mode 100644
index 000000000..68c59364b
--- /dev/null
+++ b/youtube_dl/extractor/vodlocker.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+from .common import InfoExtractor
+from ..utils import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+
+
+class VodlockerIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?vodlocker.com/(?P<id>[0-9a-zA-Z]+)(?:\..*?)?'
+
+ _TESTS = [{
+ 'url': 'http://vodlocker.com/e8wvyzz4sl42',
+ 'md5': 'ce0c2d18fa0735f1bd91b69b0e54aacf',
+ 'info_dict': {
+ 'id': 'e8wvyzz4sl42',
+ 'ext': 'mp4',
+ 'title': 'Germany vs Brazil',
+ 'thumbnail': 're:http://.*\.jpg',
+ },
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ webpage = self._download_webpage(url, video_id)
+
+ fields = dict(re.findall(r'''(?x)<input\s+
+ type="hidden"\s+
+ name="([^"]+)"\s+
+ (?:id="[^"]+"\s+)?
+ value="([^"]*)"
+ ''', webpage))
+
+ if fields['op'] == 'download1':
+ self._sleep(3, video_id) # they do detect when requests happen too fast!
+ post = compat_urllib_parse.urlencode(fields)
+ req = compat_urllib_request.Request(url, post)
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+ webpage = self._download_webpage(
+ req, video_id, 'Downloading video page')
+
+ title = self._search_regex(
+ r'id="file_title".*?>\s*(.*?)\s*<span', webpage, 'title')
+ thumbnail = self._search_regex(
+ r'image:\s*"(http[^\"]+)",', webpage, 'thumbnail')
+ url = self._search_regex(
+ r'file:\s*"(http[^\"]+)",', webpage, 'file url')
+
+ formats = [{
+ 'format_id': 'sd',
+ 'url': url,
+ }]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/vulture.py b/youtube_dl/extractor/vulture.py
new file mode 100644
index 000000000..1eb24a3d6
--- /dev/null
+++ b/youtube_dl/extractor/vulture.py
@@ -0,0 +1,69 @@
+from __future__ import unicode_literals
+
+import json
+import os.path
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class VultureIE(InfoExtractor):
+ IE_NAME = 'vulture.com'
+ _VALID_URL = r'https?://video\.vulture\.com/video/(?P<display_id>[^/]+)/'
+ _TEST = {
+ 'url': 'http://video.vulture.com/video/Mindy-Kaling-s-Harvard-Speech/player?layout=compact&read_more=1',
+ 'md5': '8d997845642a2b5152820f7257871bc8',
+ 'info_dict': {
+ 'id': '6GHRQL3RV7MSD1H4',
+ 'ext': 'mp4',
+ 'title': 'kaling-speech-2-MAGNIFY STANDARD CONTAINER REVISED',
+ 'uploader_id': 'Sarah',
+ 'thumbnail': 're:^http://.*\.jpg$',
+ 'timestamp': 1401288564,
+ 'upload_date': '20140528',
+ 'description': 'Uplifting and witty, as predicted.',
+ 'duration': 1015,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ display_id = mobj.group('display_id')
+
+ webpage = self._download_webpage(url, display_id)
+ query_string = self._search_regex(
+ r"queryString\s*=\s*'([^']+)'", webpage, 'query string')
+ video_id = self._search_regex(
+ r'content=([^&]+)', query_string, 'video ID')
+ query_url = 'http://video.vulture.com/embed/player/container/1000/1000/?%s' % query_string
+
+ query_webpage = self._download_webpage(
+ query_url, display_id, note='Downloading query page')
+ params_json = self._search_regex(
+ r'(?sm)new MagnifyEmbeddablePlayer\({.*?contentItem:\s*(\{.*?\})\n,\n',
+ query_webpage,
+ 'player params')
+ params = json.loads(params_json)
+
+ upload_timestamp = parse_iso8601(params['posted'].replace(' ', 'T'))
+ uploader_id = params.get('user', {}).get('handle')
+
+ media_item = params['media_item']
+ title = os.path.splitext(media_item['title'])[0]
+ duration = int_or_none(media_item.get('duration_seconds'))
+
+ return {
+ 'id': video_id,
+ 'display_id': display_id,
+ 'url': media_item['pipeline_xid'],
+ 'title': title,
+ 'timestamp': upload_timestamp,
+ 'thumbnail': params.get('thumbnail_url'),
+ 'uploader_id': uploader_id,
+ 'description': params.get('description'),
+ 'duration': duration,
+ }
diff --git a/youtube_dl/extractor/wdr.py b/youtube_dl/extractor/wdr.py
index feeb44b45..f741ba540 100644
--- a/youtube_dl/extractor/wdr.py
+++ b/youtube_dl/extractor/wdr.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
@@ -54,14 +55,14 @@ class WDRIE(InfoExtractor):
},
},
{
- 'url': 'http://www.funkhauseuropa.de/av/audiogrenzenlosleckerbaklava101-audioplayer.html',
- 'md5': 'cfff440d4ee64114083ac44676df5d15',
+ 'url': 'http://www.funkhauseuropa.de/av/audiosuepersongsoulbossanova100-audioplayer.html',
+ 'md5': '24e83813e832badb0a8d7d1ef9ef0691',
'info_dict': {
- 'id': 'mdb-363068',
+ 'id': 'mdb-463528',
'ext': 'mp3',
- 'title': 'Grenzenlos lecker - Baklava',
+ 'title': 'Süpersong: Soul Bossa Nova',
'description': 'md5:7b29e97e10dfb6e265238b32fa35b23a',
- 'upload_date': '20140311',
+ 'upload_date': '20140630',
},
},
]
@@ -127,9 +128,10 @@ class WDRMobileIE(InfoExtractor):
'info_dict': {
'title': '4283021',
'id': '421735',
+ 'ext': 'mp4',
'age_limit': 0,
},
- '_skip': 'Will be depublicized shortly'
+ 'skip': 'Problems with loading data.'
}
def _real_extract(self, url):
@@ -139,6 +141,7 @@ class WDRMobileIE(InfoExtractor):
'title': mobj.group('title'),
'age_limit': int(mobj.group('age_limit')),
'url': url,
+ 'ext': determine_ext(url),
'user_agent': 'mobile',
}
diff --git a/youtube_dl/extractor/wistia.py b/youtube_dl/extractor/wistia.py
index bc31c2e64..e6bfa9e14 100644
--- a/youtube_dl/extractor/wistia.py
+++ b/youtube_dl/extractor/wistia.py
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
import json
import re
@@ -5,14 +7,16 @@ from .common import InfoExtractor
class WistiaIE(InfoExtractor):
- _VALID_URL = r'^https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
+ _VALID_URL = r'https?://(?:fast\.)?wistia\.net/embed/iframe/(?P<id>[a-z0-9]+)'
_TEST = {
- u"url": u"http://fast.wistia.net/embed/iframe/sh7fpupwlt",
- u"file": u"sh7fpupwlt.mov",
- u"md5": u"cafeb56ec0c53c18c97405eecb3133df",
- u"info_dict": {
- u"title": u"cfh_resourceful_zdkh_final_1"
+ 'url': 'http://fast.wistia.net/embed/iframe/sh7fpupwlt',
+ 'md5': 'cafeb56ec0c53c18c97405eecb3133df',
+ 'info_dict': {
+ 'id': 'sh7fpupwlt',
+ 'ext': 'mov',
+ 'title': 'Being Resourceful',
+ 'duration': 117,
},
}
@@ -22,7 +26,7 @@ class WistiaIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
data_json = self._html_search_regex(
- r'Wistia.iframeInit\((.*?), {}\);', webpage, u'video data')
+ r'Wistia\.iframeInit\((.*?), {}\);', webpage, 'video data')
data = json.loads(data_json)
@@ -54,4 +58,5 @@ class WistiaIE(InfoExtractor):
'title': data['name'],
'formats': formats,
'thumbnails': thumbnails,
+ 'duration': data.get('duration'),
}
diff --git a/youtube_dl/extractor/wrzuta.py b/youtube_dl/extractor/wrzuta.py
new file mode 100644
index 000000000..34dd6d952
--- /dev/null
+++ b/youtube_dl/extractor/wrzuta.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ qualities,
+)
+
+
+class WrzutaIE(InfoExtractor):
+ IE_NAME = 'wrzuta.pl'
+
+ _VALID_URL = r'https?://(?P<uploader>[0-9a-zA-Z]+)\.wrzuta\.pl/(?P<typ>film|audio)/(?P<id>[0-9a-zA-Z]+)'
+
+ _TESTS = [{
+ 'url': 'http://laboratoriumdextera.wrzuta.pl/film/aq4hIZWrkBu/nike_football_the_last_game',
+ 'md5': '9e67e05bed7c03b82488d87233a9efe7',
+ 'info_dict': {
+ 'id': 'aq4hIZWrkBu',
+ 'ext': 'mp4',
+ 'title': 'Nike Football: The Last Game',
+ 'duration': 307,
+ 'uploader_id': 'laboratoriumdextera',
+ 'description': 'md5:7fb5ef3c21c5893375fda51d9b15d9cd',
+ },
+ }, {
+ 'url': 'http://w729.wrzuta.pl/audio/9oXJqdcndqv/david_guetta_amp_showtek_ft._vassy_-_bad',
+ 'md5': '1e546a18e1c22ac6e9adce17b8961ff5',
+ 'info_dict': {
+ 'id': '9oXJqdcndqv',
+ 'ext': 'ogg',
+ 'title': 'David Guetta & Showtek ft. Vassy - Bad',
+ 'duration': 270,
+ 'uploader_id': 'w729',
+ 'description': 'md5:4628f01c666bbaaecefa83476cfa794a',
+ },
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ typ = mobj.group('typ')
+ uploader = mobj.group('uploader')
+
+ webpage = self._download_webpage(url, video_id)
+
+ quality = qualities(['SD', 'MQ', 'HQ', 'HD'])
+
+ audio_table = {'flv': 'mp3', 'webm': 'ogg'}
+
+ embedpage = self._download_json('http://www.wrzuta.pl/npp/embed/%s/%s' % (uploader, video_id), video_id)
+
+ formats = []
+ for media in embedpage['url']:
+ if typ == 'audio':
+ ext = audio_table[media['type'].split('@')[0]]
+ else:
+ ext = media['type'].split('@')[0]
+
+ formats.append({
+ 'format_id': '%s_%s' % (ext, media['quality'].lower()),
+ 'url': media['url'],
+ 'ext': ext,
+ 'quality': quality(media['quality']),
+ })
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': self._og_search_title(webpage),
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ 'formats': formats,
+ 'duration': int_or_none(embedpage['duration']),
+ 'uploader_id': uploader,
+ 'description': self._og_search_description(webpage),
+ 'age_limit': embedpage.get('minimalAge', 0),
+ }
diff --git a/youtube_dl/extractor/xvideos.py b/youtube_dl/extractor/xvideos.py
index 85e99e1b0..7e0044824 100644
--- a/youtube_dl/extractor/xvideos.py
+++ b/youtube_dl/extractor/xvideos.py
@@ -5,18 +5,21 @@ import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
+ ExtractorError,
+ clean_html,
)
class XVideosIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
_TEST = {
- 'url': 'http://www.xvideos.com/video939581/funny_porns_by_s_-1',
- 'file': '939581.flv',
- 'md5': '1d0c835822f0a71a7bf011855db929d0',
+ 'url': 'http://www.xvideos.com/video4588838/biker_takes_his_girl',
+ 'md5': '4b46ae6ea5e6e9086e714d883313c0c9',
'info_dict': {
- "title": "Funny Porns By >>>>S<<<<<< -1",
- "age_limit": 18,
+ 'id': '4588838',
+ 'ext': 'flv',
+ 'title': 'Biker Takes his Girl',
+ 'age_limit': 18,
}
}
@@ -28,6 +31,10 @@ class XVideosIE(InfoExtractor):
self.report_extraction(video_id)
+ mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage)
+ if mobj:
+ raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)
+
# Extract video URL
video_url = compat_urllib_parse.unquote(
self._search_regex(r'flv_url=(.+?)&', webpage, 'video URL'))
diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py
index 393f6ffbe..d84be2562 100644
--- a/youtube_dl/extractor/yahoo.py
+++ b/youtube_dl/extractor/yahoo.py
@@ -21,7 +21,7 @@ class YahooIE(InfoExtractor):
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'md5': '4962b075c08be8690a922ee026d05e69',
'info_dict': {
- 'id': '214727115',
+ 'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
@@ -31,7 +31,7 @@ class YahooIE(InfoExtractor):
'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
'info_dict': {
- 'id': '103000935',
+ 'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
'description': 'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?',
@@ -58,9 +58,11 @@ class YahooIE(InfoExtractor):
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
if items_json is None:
- long_id = self._search_regex(
+ CONTENT_ID_REGEXES = [
r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
- webpage, 'content ID')
+ r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"'
+ ]
+ long_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
video_id = long_id
else:
items = json.loads(items_json)
@@ -68,9 +70,9 @@ class YahooIE(InfoExtractor):
# The 'meta' field is not always in the video webpage, we request it
# from another page
long_id = info['id']
- return self._get_info(long_id, video_id)
+ return self._get_info(long_id, video_id, webpage)
- def _get_info(self, long_id, video_id):
+ def _get_info(self, long_id, video_id, webpage):
query = ('SELECT * FROM yahoo.media.video.streams WHERE id="%s"'
' AND plrs="86Gj0vCaSzV_Iuf6hNylf2" AND region="US"'
' AND protocol="http"' % long_id)
@@ -113,7 +115,7 @@ class YahooIE(InfoExtractor):
'title': meta['title'],
'formats': formats,
'description': clean_html(meta['description']),
- 'thumbnail': meta['thumbnail'],
+ 'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
}
@@ -137,7 +139,7 @@ class YahooNewsIE(YahooIE):
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
long_id = self._search_regex(r'contentId: \'(.+?)\',', webpage, 'long id')
- return self._get_info(long_id, video_id)
+ return self._get_info(long_id, video_id, webpage)
class YahooSearchIE(SearchInfoExtractor):
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 1f3aa4322..6123e1256 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -223,6 +223,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+ '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
+ '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 48, 'preference': -50},
@@ -242,7 +244,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
u"uploader": u"Philipp Hagemeister",
u"uploader_id": u"phihag",
u"upload_date": u"20121002",
- u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ."
+ u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .",
+ u"categories": [u'Science & Technology'],
}
},
{
@@ -438,7 +441,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
- r'signature=([a-zA-Z]+)', jscode,
+ r'signature=([$a-zA-Z]+)', jscode,
u'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
@@ -862,71 +865,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
- if player_url is not None:
- if player_url.startswith(u'//'):
- player_url = u'https:' + player_url
- try:
- player_id = (player_url, len(s))
- if player_id not in self._player_cache:
- func = self._extract_signature_function(
- video_id, player_url, len(s)
- )
- self._player_cache[player_id] = func
- func = self._player_cache[player_id]
- if self._downloader.params.get('youtube_print_sig_code'):
- self._print_sig_code(func, len(s))
- return func(s)
- except Exception:
- tb = traceback.format_exc()
- self._downloader.report_warning(
- u'Automatic signature extraction failed: ' + tb)
-
- self._downloader.report_warning(
- u'Warning: Falling back to static signature algorithm')
-
- return self._static_decrypt_signature(
- s, video_id, player_url, age_gate)
-
- def _static_decrypt_signature(self, s, video_id, player_url, age_gate):
- if age_gate:
- # The videos with age protection use another player, so the
- # algorithms can be different.
- if len(s) == 86:
- return s[2:63] + s[82] + s[64:82] + s[63]
-
- if len(s) == 93:
- return s[86:29:-1] + s[88] + s[28:5:-1]
- elif len(s) == 92:
- return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83]
- elif len(s) == 91:
- return s[84:27:-1] + s[86] + s[26:5:-1]
- elif len(s) == 90:
- return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + s[78:81]
- elif len(s) == 89:
- return s[84:78:-1] + s[87] + s[77:60:-1] + s[0] + s[59:3:-1]
- elif len(s) == 88:
- return s[7:28] + s[87] + s[29:45] + s[55] + s[46:55] + s[2] + s[56:87] + s[28]
- elif len(s) == 87:
- return s[6:27] + s[4] + s[28:39] + s[27] + s[40:59] + s[2] + s[60:]
- elif len(s) == 86:
- return s[80:72:-1] + s[16] + s[71:39:-1] + s[72] + s[38:16:-1] + s[82] + s[15::-1]
- elif len(s) == 85:
- return s[3:11] + s[0] + s[12:55] + s[84] + s[56:84]
- elif len(s) == 84:
- return s[78:70:-1] + s[14] + s[69:37:-1] + s[70] + s[36:14:-1] + s[80] + s[:14][::-1]
- elif len(s) == 83:
- return s[80:63:-1] + s[0] + s[62:0:-1] + s[63]
- elif len(s) == 82:
- return s[80:37:-1] + s[7] + s[36:7:-1] + s[0] + s[6:0:-1] + s[37]
- elif len(s) == 81:
- return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
- elif len(s) == 80:
- return s[1:19] + s[0] + s[20:68] + s[19] + s[69:80]
- elif len(s) == 79:
- return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9]
+ if player_url is None:
+ raise ExtractorError(u'Cannot decrypt signature without player_url')
- else:
- raise ExtractorError(u'Unable to decrypt signature, key length %d not supported; retrying might work' % (len(s)))
+ if player_url.startswith(u'//'):
+ player_url = u'https:' + player_url
+ try:
+ player_id = (player_url, len(s))
+ if player_id not in self._player_cache:
+ func = self._extract_signature_function(
+ video_id, player_url, len(s)
+ )
+ self._player_cache[player_id] = func
+ func = self._player_cache[player_id]
+ if self._downloader.params.get('youtube_print_sig_code'):
+ self._print_sig_code(func, len(s))
+ return func(s)
+ except Exception as e:
+ tb = traceback.format_exc()
+ raise ExtractorError(
+ u'Automatic signature extraction failed: ' + tb, cause=e)
def _get_available_subtitles(self, video_id, webpage):
try:
@@ -1136,11 +1094,24 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
# upload date
upload_date = None
- mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL)
+ mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
+ if mobj is None:
+ mobj = re.search(
+ r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
+ video_webpage)
if mobj is not None:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
+ m_cat_container = get_element_by_id("eow-category", video_webpage)
+ if m_cat_container:
+ category = self._html_search_regex(
+ r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
+ default=None)
+ video_categories = None if category is None else [category]
+ else:
+ video_categories = None
+
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
@@ -1347,6 +1318,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
+ 'categories': video_categories,
'subtitles': video_subtitles,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
@@ -1370,13 +1342,13 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
| p/
)
(
- (?:PL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
+ (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
- ((?:PL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
+ ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_MORE_PAGES_INDICATOR = r'data-link-type="next"'
@@ -1399,11 +1371,9 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
title_span = (search_title('playlist-title') or
search_title('title long-title') or search_title('title'))
title = clean_html(title_span)
- video_re = r'''(?x)data-video-username="(.*?)".*?
+ video_re = r'''(?x)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id)
- matches = orderedSet(re.findall(video_re, webpage, flags=re.DOTALL))
- # Some of the videos may have been deleted, their username field is empty
- ids = [video_id for (username, video_id) in matches if username]
+ ids = orderedSet(re.findall(video_re, webpage, flags=re.DOTALL))
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, title)
@@ -1683,14 +1653,14 @@ class YoutubeSearchURLIE(InfoExtractor):
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
- r'(?s)<ol id="search-results"(.*?)</ol>', webpage, u'result HTML')
+ r'(?s)<ol class="item-section"(.*?)</ol>', webpage, u'result HTML')
part_codes = re.findall(
r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
- r'(?s)title="([^"]+)"', part_code, 'item title', fatal=False)
+ [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
part_url_snippet = self._html_search_regex(
r'(?s)href="([^"]+)"', part_code, 'item URL')
part_url = compat_urlparse.urljoin(
@@ -1760,9 +1730,12 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
feed_entries.extend(
self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in ids)
- if info['paging'] is None:
+ mobj = re.search(
+ r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
+ feed_html)
+ if mobj is None:
break
- paging = info['paging']
+ paging = mobj.group('paging')
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
@@ -1807,10 +1780,21 @@ class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
- (?:https?://)?[^/]+/watch\?(?:feature=[a-z_]+)?$|
+ (?:https?://)?[^/]+/watch\?(?:
+ feature=[a-z_]+|
+ annotation_id=annotation_[^&]+
+ )?$|
(?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$
'''
+ _TESTS = [{
+ 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.youtube.com/watch?',
+ 'only_matching': True,
+ }]
+
def _real_extract(self, url):
raise ExtractorError(
u'Did you forget to quote the URL? Remember that & is a meta '
diff --git a/youtube_dl/jsinterp.py b/youtube_dl/jsinterp.py
index 449482d3c..3bbb07704 100644
--- a/youtube_dl/jsinterp.py
+++ b/youtube_dl/jsinterp.py
@@ -59,7 +59,7 @@ class JSInterpreter(object):
if member == 'split("")':
return list(val)
if member == 'join("")':
- return u''.join(val)
+ return ''.join(val)
if member == 'length':
return len(val)
if member == 'reverse()':
@@ -99,7 +99,7 @@ class JSInterpreter(object):
def extract_function(self, funcname):
func_m = re.search(
- (r'(?:function %s|%s\s*=\s*function)' % (
+ (r'(?:function %s|[{;]%s\s*=\s*function)' % (
re.escape(funcname), re.escape(funcname))) +
r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
self.code)
diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py
index 602e370f4..45328ed43 100644
--- a/youtube_dl/postprocessor/ffmpeg.py
+++ b/youtube_dl/postprocessor/ffmpeg.py
@@ -9,6 +9,7 @@ from .common import AudioConversionError, PostProcessor
from ..utils import (
check_executable,
compat_subprocess_get_DEVNULL,
+ encodeArgument,
encodeFilename,
PostProcessingError,
prepend_extension,
@@ -48,7 +49,7 @@ class FFmpegPostProcessor(PostProcessor):
for path in input_paths:
files_cmd.extend(['-i', encodeFilename(path, True)])
cmd = ([self._get_executable(), '-y'] + files_cmd
- + opts +
+ + [encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
diff --git a/youtube_dl/postprocessor/xattrpp.py b/youtube_dl/postprocessor/xattrpp.py
index 18979241c..f6940940b 100644
--- a/youtube_dl/postprocessor/xattrpp.py
+++ b/youtube_dl/postprocessor/xattrpp.py
@@ -6,6 +6,7 @@ from .common import PostProcessor
from ..utils import (
check_executable,
hyphenate_date,
+ subprocess_check_output
)
@@ -57,7 +58,7 @@ class XAttrMetadataPP(PostProcessor):
elif user_has_xattr:
cmd = ['xattr', '-w', key, value, path]
- subprocess.check_output(cmd)
+ subprocess_check_output(cmd)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 3e7947f5d..2cba2bfc1 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -540,6 +540,16 @@ def encodeFilename(s, for_subprocess=False):
encoding = 'utf-8'
return s.encode(encoding, 'ignore')
+
+def encodeArgument(s):
+ if not isinstance(s, compat_str):
+ # Legacy code that uses byte strings
+ # Uncomment the following line after fixing all post processors
+ #assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
+ s = s.decode('ascii')
+ return encodeFilename(s, True)
+
+
def decodeOption(optval):
if optval is None:
return optval
@@ -806,6 +816,9 @@ def unified_strdate(date_str):
'%d %b %Y',
'%B %d %Y',
'%b %d %Y',
+ '%b %dst %Y %I:%M%p',
+ '%b %dnd %Y %I:%M%p',
+ '%b %dth %Y %I:%M%p',
'%Y-%m-%d',
'%d.%m.%Y',
'%d/%m/%Y',
@@ -1429,3 +1442,15 @@ def qualities(quality_ids):
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
+
+try:
+ subprocess_check_output = subprocess.check_output
+except AttributeError:
+ def subprocess_check_output(*args, **kwargs):
+ assert 'input' not in kwargs
+ p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
+ output, _ = p.communicate()
+ ret = p.poll()
+ if ret:
+ raise subprocess.CalledProcessError(ret, p.args, output=output)
+ return output
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index fc78be37d..d6b05892c 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,2 +1,2 @@
-__version__ = '2014.05.13'
+__version__ = '2014.07.11'