aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/extractor
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl/extractor')
-rw-r--r--youtube_dl/extractor/__init__.py2
-rw-r--r--youtube_dl/extractor/bloomberg.py6
-rw-r--r--youtube_dl/extractor/breakcom.py29
-rw-r--r--youtube_dl/extractor/brightcove.py3
-rw-r--r--youtube_dl/extractor/common.py5
-rw-r--r--youtube_dl/extractor/dotsub.py51
-rw-r--r--youtube_dl/extractor/exfm.py76
-rw-r--r--youtube_dl/extractor/firsttv.py59
-rw-r--r--youtube_dl/extractor/freesound.py43
-rw-r--r--youtube_dl/extractor/googleplus.py59
-rw-r--r--youtube_dl/extractor/howcast.py40
-rw-r--r--youtube_dl/extractor/instagram.py40
-rw-r--r--youtube_dl/extractor/kontrtube.py66
-rw-r--r--youtube_dl/extractor/lifenews.py26
-rw-r--r--youtube_dl/extractor/slideshare.py23
-rw-r--r--youtube_dl/extractor/youtube.py12
16 files changed, 343 insertions, 197 deletions
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index b2e92bcaf..700e9920e 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -64,6 +64,7 @@ from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .firstpost import FirstpostIE
+from .firsttv import FirstTVIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
@@ -115,6 +116,7 @@ from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
+from .kontrtube import KontrTubeIE
from .la7 import LA7IE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
diff --git a/youtube_dl/extractor/bloomberg.py b/youtube_dl/extractor/bloomberg.py
index df2cff81c..2415ce403 100644
--- a/youtube_dl/extractor/bloomberg.py
+++ b/youtube_dl/extractor/bloomberg.py
@@ -24,5 +24,7 @@ class BloombergIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
- ooyala_url = self._twitter_search_player(webpage)
- return self.url_result(ooyala_url, OoyalaIE.ie_key())
+ embed_code = self._search_regex(
+ r'<source src="https?://[^/]+/[^/]+/[^/]+/([^/]+)', webpage,
+ 'embed code')
+ return OoyalaIE._build_url_result(embed_code)
diff --git a/youtube_dl/extractor/breakcom.py b/youtube_dl/extractor/breakcom.py
index 53a898de3..8ec6dda49 100644
--- a/youtube_dl/extractor/breakcom.py
+++ b/youtube_dl/extractor/breakcom.py
@@ -1,18 +1,20 @@
+from __future__ import unicode_literals
+
import re
import json
from .common import InfoExtractor
-from ..utils import determine_ext
class BreakIE(InfoExtractor):
- _VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
+ _VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
_TEST = {
- u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056',
- u'file': u'2468056.mp4',
- u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b',
- u'info_dict': {
- u"title": u"When Girls Act Like D-Bags"
+ 'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
+ 'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
+ 'info_dict': {
+ 'id': '2468056',
+ 'ext': 'mp4',
+ 'title': 'When Girls Act Like D-Bags',
}
}
@@ -22,17 +24,16 @@ class BreakIE(InfoExtractor):
embed_url = 'http://www.break.com/embed/%s' % video_id
webpage = self._download_webpage(embed_url, video_id)
info_json = self._search_regex(r'var embedVars = ({.*?});', webpage,
- u'info json', flags=re.DOTALL)
+ 'info json', flags=re.DOTALL)
info = json.loads(info_json)
video_url = info['videoUri']
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url)
if m_youtube is not None:
return self.url_result(m_youtube.group(1), 'Youtube')
final_url = video_url + '?' + info['AuthToken']
- return [{
- 'id': video_id,
- 'url': final_url,
- 'ext': determine_ext(final_url),
- 'title': info['contentName'],
+ return {
+ 'id': video_id,
+ 'url': final_url,
+ 'title': info['contentName'],
'thumbnail': info['thumbUri'],
- }]
+ }
diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py
index 031fe385d..83eec84d3 100644
--- a/youtube_dl/extractor/brightcove.py
+++ b/youtube_dl/extractor/brightcove.py
@@ -17,6 +17,7 @@ from ..utils import (
ExtractorError,
unsmuggle_url,
+ unescapeHTML,
)
@@ -139,7 +140,7 @@ class BrightcoveIE(InfoExtractor):
url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage)
if url_m:
- return [url_m.group(1)]
+ return [unescapeHTML(url_m.group(1))]
matches = re.findall(
r'''(?sx)<object
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 2c0c75604..84fca8ba0 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -271,8 +271,11 @@ class InfoExtractor(object):
def _download_json(self, url_or_request, video_id,
note=u'Downloading JSON metadata',
- errnote=u'Unable to download JSON metadata'):
+ errnote=u'Unable to download JSON metadata',
+ transform_source=None):
json_string = self._download_webpage(url_or_request, video_id, note, errnote)
+ if transform_source:
+ json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
diff --git a/youtube_dl/extractor/dotsub.py b/youtube_dl/extractor/dotsub.py
index 0ee9a684e..5ae0ad5b6 100644
--- a/youtube_dl/extractor/dotsub.py
+++ b/youtube_dl/extractor/dotsub.py
@@ -1,41 +1,42 @@
+from __future__ import unicode_literals
+
import re
-import json
import time
from .common import InfoExtractor
class DotsubIE(InfoExtractor):
- _VALID_URL = r'(?:http://)?(?:www\.)?dotsub\.com/view/([^/]+)'
+ _VALID_URL = r'http://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
_TEST = {
- u'url': u'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
- u'file': u'aed3b8b2-1889-4df5-ae63-ad85f5572f27.flv',
- u'md5': u'0914d4d69605090f623b7ac329fea66e',
- u'info_dict': {
- u"title": u"Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary",
- u"uploader": u"4v4l0n42",
- u'description': u'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
- u'thumbnail': u'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
- u'upload_date': u'20101213',
+ 'url': 'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
+ 'md5': '0914d4d69605090f623b7ac329fea66e',
+ 'info_dict': {
+ 'id': 'aed3b8b2-1889-4df5-ae63-ad85f5572f27',
+ 'ext': 'flv',
+ 'title': 'Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary',
+ 'uploader': '4v4l0n42',
+ 'description': 'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
+ 'thumbnail': 'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
+ 'upload_date': '20101213',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
- info_url = "https://dotsub.com/api/media/%s/metadata" %(video_id)
- webpage = self._download_webpage(info_url, video_id)
- info = json.loads(webpage)
+ video_id = mobj.group('id')
+ info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
+ info = self._download_json(info_url, video_id)
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
- return [{
- 'id': video_id,
- 'url': info['mediaURI'],
- 'ext': 'flv',
- 'title': info['title'],
- 'thumbnail': info['screenshotURI'],
+ return {
+ 'id': video_id,
+ 'url': info['mediaURI'],
+ 'ext': 'flv',
+ 'title': info['title'],
+ 'thumbnail': info['screenshotURI'],
'description': info['description'],
- 'uploader': info['user'],
- 'view_count': info['numberOfViews'],
- 'upload_date': u'%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
- }]
+ 'uploader': info['user'],
+ 'view_count': info['numberOfViews'],
+ 'upload_date': '%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
+ }
diff --git a/youtube_dl/extractor/exfm.py b/youtube_dl/extractor/exfm.py
index 682901d16..4de02aee9 100644
--- a/youtube_dl/extractor/exfm.py
+++ b/youtube_dl/extractor/exfm.py
@@ -1,56 +1,58 @@
+from __future__ import unicode_literals
+
import re
-import json
from .common import InfoExtractor
class ExfmIE(InfoExtractor):
- IE_NAME = u'exfm'
- IE_DESC = u'ex.fm'
- _VALID_URL = r'(?:http://)?(?:www\.)?ex\.fm/song/([^/]+)'
- _SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
+ IE_NAME = 'exfm'
+ IE_DESC = 'ex.fm'
+ _VALID_URL = r'http://(?:www\.)?ex\.fm/song/(?P<id>[^/]+)'
+ _SOUNDCLOUD_URL = r'http://(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
_TESTS = [
{
- u'url': u'http://ex.fm/song/eh359',
- u'file': u'44216187.mp3',
- u'md5': u'e45513df5631e6d760970b14cc0c11e7',
- u'info_dict': {
- u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive",
- u"uploader": u"deadjournalist",
- u'upload_date': u'20120424',
- u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
+ 'url': 'http://ex.fm/song/eh359',
+ 'md5': 'e45513df5631e6d760970b14cc0c11e7',
+ 'info_dict': {
+ 'id': '44216187',
+ 'ext': 'mp3',
+ 'title': 'Test House "Love Is Not Enough" (Extended Mix) DeadJournalist Exclusive',
+ 'uploader': 'deadjournalist',
+ 'upload_date': '20120424',
+ 'description': 'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
},
- u'note': u'Soundcloud song',
- u'skip': u'The site is down too often',
+ 'note': 'Soundcloud song',
+ 'skip': 'The site is down too often',
},
{
- u'url': u'http://ex.fm/song/wddt8',
- u'file': u'wddt8.mp3',
- u'md5': u'966bd70741ac5b8570d8e45bfaed3643',
- u'info_dict': {
- u'title': u'Safe and Sound',
- u'uploader': u'Capital Cities',
+ 'url': 'http://ex.fm/song/wddt8',
+ 'md5': '966bd70741ac5b8570d8e45bfaed3643',
+ 'info_dict': {
+ 'id': 'wddt8',
+ 'ext': 'mp3',
+ 'title': 'Safe and Sound',
+ 'uploader': 'Capital Cities',
},
- u'skip': u'The site is down too often',
+ 'skip': 'The site is down too often',
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- song_id = mobj.group(1)
- info_url = "http://ex.fm/api/v3/song/%s" %(song_id)
- webpage = self._download_webpage(info_url, song_id)
- info = json.loads(webpage)
- song_url = info['song']['url']
+ song_id = mobj.group('id')
+ info_url = "http://ex.fm/api/v3/song/%s" % song_id
+ info = self._download_json(info_url, song_id)['song']
+ song_url = info['url']
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
self.to_screen('Soundcloud song detected')
- return self.url_result(song_url.replace('/stream',''), 'Soundcloud')
- return [{
- 'id': song_id,
- 'url': song_url,
- 'ext': 'mp3',
- 'title': info['song']['title'],
- 'thumbnail': info['song']['image']['large'],
- 'uploader': info['song']['artist'],
- 'view_count': info['song']['loved_count'],
- }]
+ return self.url_result(song_url.replace('/stream', ''), 'Soundcloud')
+ return {
+ 'id': song_id,
+ 'url': song_url,
+ 'ext': 'mp3',
+ 'title': info['title'],
+ 'thumbnail': info['image']['large'],
+ 'uploader': info['artist'],
+ 'view_count': info['loved_count'],
+ }
diff --git a/youtube_dl/extractor/firsttv.py b/youtube_dl/extractor/firsttv.py
new file mode 100644
index 000000000..44aca7b75
--- /dev/null
+++ b/youtube_dl/extractor/firsttv.py
@@ -0,0 +1,59 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import int_or_none
+
+
+class FirstTVIE(InfoExtractor):
+ IE_NAME = 'firsttv'
+ IE_DESC = 'Видеоархив - Первый канал'
+ _VALID_URL = r'http://(?:www\.)?1tv\.ru/videoarchive/(?P<id>\d+)'
+
+ _TEST = {
+ 'url': 'http://www.1tv.ru/videoarchive/73390',
+ 'md5': '3de6390cf0cca4a5eae1d1d83895e5ad',
+ 'info_dict': {
+ 'id': '73390',
+ 'ext': 'mp4',
+ 'title': 'Олимпийские канатные дороги',
+ 'description': 'md5:cc730d2bf4215463e37fff6a1e277b13',
+ 'thumbnail': 'http://img1.1tv.ru/imgsize640x360/PR20140210114657.JPG',
+ 'duration': 149,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id, 'Downloading page')
+
+ video_url = self._html_search_regex(
+ r'''(?s)jwplayer\('flashvideoportal_1'\)\.setup\({.*?'file': '([^']+)'.*?}\);''', webpage, 'video URL')
+
+ title = self._html_search_regex(
+ r'<div class="tv_translation">\s*<h1><a href="[^"]+">([^<]*)</a>', webpage, 'title')
+ description = self._html_search_regex(
+ r'<div class="descr">\s*<div>&nbsp;</div>\s*<p>([^<]*)</p></div>', webpage, 'description', fatal=False)
+
+ thumbnail = self._og_search_thumbnail(webpage)
+ duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
+
+ like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
+ webpage, 'like count', fatal=False)
+ dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
+ webpage, 'dislike count', fatal=False)
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'thumbnail': thumbnail,
+ 'title': title,
+ 'description': description,
+ 'duration': int_or_none(duration),
+ 'like_count': int_or_none(like_count),
+ 'dislike_count': int_or_none(dislike_count),
+ } \ No newline at end of file
diff --git a/youtube_dl/extractor/freesound.py b/youtube_dl/extractor/freesound.py
index de14b12e5..5ff62af2a 100644
--- a/youtube_dl/extractor/freesound.py
+++ b/youtube_dl/extractor/freesound.py
@@ -1,18 +1,21 @@
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
-from ..utils import determine_ext
+
class FreesoundIE(InfoExtractor):
- _VALID_URL = r'(?:https?://)?(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
+ _VALID_URL = r'https?://(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
_TEST = {
- u'url': u'http://www.freesound.org/people/miklovan/sounds/194503/',
- u'file': u'194503.mp3',
- u'md5': u'12280ceb42c81f19a515c745eae07650',
- u'info_dict': {
- u"title": u"gulls in the city.wav",
- u"uploader" : u"miklovan",
- u'description': u'the sounds of seagulls in the city',
+ 'url': 'http://www.freesound.org/people/miklovan/sounds/194503/',
+ 'md5': '12280ceb42c81f19a515c745eae07650',
+ 'info_dict': {
+ 'id': '194503',
+ 'ext': 'mp3',
+ 'title': 'gulls in the city.wav',
+ 'uploader': 'miklovan',
+ 'description': 'the sounds of seagulls in the city',
}
}
@@ -20,17 +23,17 @@ class FreesoundIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
music_id = mobj.group('id')
webpage = self._download_webpage(url, music_id)
- title = self._html_search_regex(r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
- webpage, 'music title', flags=re.DOTALL)
- music_url = self._og_search_property('audio', webpage, 'music url')
- description = self._html_search_regex(r'<div id="sound_description">(.*?)</div>',
- webpage, 'description', fatal=False, flags=re.DOTALL)
+ title = self._html_search_regex(
+ r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
+ webpage, 'music title', flags=re.DOTALL)
+ description = self._html_search_regex(
+ r'<div id="sound_description">(.*?)</div>', webpage, 'description',
+ fatal=False, flags=re.DOTALL)
- return [{
- 'id': music_id,
- 'title': title,
- 'url': music_url,
+ return {
+ 'id': music_id,
+ 'title': title,
+ 'url': self._og_search_property('audio', webpage, 'music url'),
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
- 'ext': determine_ext(music_url),
'description': description,
- }]
+ }
diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py
index 2570746b2..cc29a7e5d 100644
--- a/youtube_dl/extractor/googleplus.py
+++ b/youtube_dl/extractor/googleplus.py
@@ -1,4 +1,5 @@
# coding: utf-8
+from __future__ import unicode_literals
import datetime
import re
@@ -10,32 +11,28 @@ from ..utils import (
class GooglePlusIE(InfoExtractor):
- IE_DESC = u'Google Plus'
- _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
- IE_NAME = u'plus.google'
+ IE_DESC = 'Google Plus'
+ _VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
+ IE_NAME = 'plus.google'
_TEST = {
- u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
- u"file": u"ZButuJc6CtH.flv",
- u"info_dict": {
- u"upload_date": u"20120613",
- u"uploader": u"井上ヨシマサ",
- u"title": u"嘆きの天使 降臨"
+ 'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
+ 'info_dict': {
+ 'id': 'ZButuJc6CtH',
+ 'ext': 'flv',
+ 'upload_date': '20120613',
+ 'uploader': '井上ヨシマサ',
+ 'title': '嘆きの天使 降臨',
}
}
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
- if mobj is None:
- raise ExtractorError(u'Invalid URL: %s' % url)
- post_url = mobj.group(0)
- video_id = mobj.group(1)
-
- video_extension = 'flv'
+ video_id = mobj.group('id')
# Step 1, Retrieve post webpage to extract further information
- webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
+ webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
self.report_extraction(video_id)
@@ -43,7 +40,7 @@ class GooglePlusIE(InfoExtractor):
upload_date = self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
- webpage, u'upload date', fatal=False, flags=re.VERBOSE)
+ webpage, 'upload date', fatal=False, flags=re.VERBOSE)
if upload_date:
# Convert timestring to a format suitable for filename
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
@@ -51,28 +48,27 @@ class GooglePlusIE(InfoExtractor):
# Extract uploader
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
- webpage, u'uploader', fatal=False)
+ webpage, 'uploader', fatal=False)
# Extract title
# Get the first line for title
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
- webpage, 'title', default=u'NA')
+ webpage, 'title', default='NA')
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
- webpage, u'video page URL')
+ webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
- webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
+ webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
- # Extract video links on video page
- """Extract video links of all sizes"""
+ # Extract video links all sizes
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage)
if len(mobj) == 0:
- raise ExtractorError(u'Unable to extract video links')
+ raise ExtractorError('Unable to extract video links')
# Sort in resolution
links = sorted(mobj)
@@ -87,12 +83,11 @@ class GooglePlusIE(InfoExtractor):
except AttributeError: # Python 3
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
-
- return [{
- 'id': video_id,
- 'url': video_url,
+ return {
+ 'id': video_id,
+ 'url': video_url,
'uploader': uploader,
- 'upload_date': upload_date,
- 'title': video_title,
- 'ext': video_extension,
- }]
+ 'upload_date': upload_date,
+ 'title': video_title,
+ 'ext': 'flv',
+ }
diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py
index bafc5826f..6ae04782c 100644
--- a/youtube_dl/extractor/howcast.py
+++ b/youtube_dl/extractor/howcast.py
@@ -1,17 +1,20 @@
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
class HowcastIE(InfoExtractor):
- _VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
_TEST = {
- u'url': u'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
- u'file': u'390161.mp4',
- u'md5': u'8b743df908c42f60cf6496586c7f12c3',
- u'info_dict': {
- u"description": u"The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here's the proper way to tie a square knot.",
- u"title": u"How to Tie a Square Knot Properly"
+ 'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
+ 'md5': '8b743df908c42f60cf6496586c7f12c3',
+ 'info_dict': {
+ 'id': '390161',
+ 'ext': 'mp4',
+ 'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
+ 'title': 'How to Tie a Square Knot Properly',
}
}
@@ -24,22 +27,15 @@ class HowcastIE(InfoExtractor):
self.report_extraction(video_id)
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
- webpage, u'video URL')
-
- video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
- webpage, u'title')
+ webpage, 'video URL')
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
- webpage, u'description', fatal=False)
+ webpage, 'description', fatal=False)
- thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
- webpage, u'thumbnail', fatal=False)
-
- return [{
- 'id': video_id,
- 'url': video_url,
- 'ext': 'mp4',
- 'title': video_title,
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': self._og_search_title(webpage),
'description': video_description,
- 'thumbnail': thumbnail,
- }]
+ 'thumbnail': self._og_search_thumbnail(webpage),
+ }
diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py
index 660573d02..63141af27 100644
--- a/youtube_dl/extractor/instagram.py
+++ b/youtube_dl/extractor/instagram.py
@@ -1,35 +1,39 @@
+from __future__ import unicode_literals
+
import re
from .common import InfoExtractor
+
class InstagramIE(InfoExtractor):
- _VALID_URL = r'(?:http://)?instagram\.com/p/(.*?)/'
+ _VALID_URL = r'http://instagram\.com/p/(?P<id>.*?)/'
_TEST = {
- u'url': u'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
- u'file': u'aye83DjauH.mp4',
- u'md5': u'0d2da106a9d2631273e192b372806516',
- u'info_dict': {
- u"uploader_id": u"naomipq",
- u"title": u"Video by naomipq",
- u'description': u'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
+ 'url': 'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
+ 'md5': '0d2da106a9d2631273e192b372806516',
+ 'info_dict': {
+ 'id': 'aye83DjauH',
+ 'ext': 'mp4',
+ 'uploader_id': 'naomipq',
+ 'title': 'Video by naomipq',
+ 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
- webpage, u'uploader id', fatal=False)
- desc = self._search_regex(r'"caption":"(.*?)"', webpage, u'description',
+ webpage, 'uploader id', fatal=False)
+ desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
fatal=False)
- return [{
- 'id': video_id,
- 'url': self._og_search_video_url(webpage, secure=False),
- 'ext': 'mp4',
- 'title': u'Video by %s' % uploader_id,
+ return {
+ 'id': video_id,
+ 'url': self._og_search_video_url(webpage, secure=False),
+ 'ext': 'mp4',
+ 'title': 'Video by %s' % uploader_id,
'thumbnail': self._og_search_thumbnail(webpage),
- 'uploader_id' : uploader_id,
+ 'uploader_id': uploader_id,
'description': desc,
- }]
+ }
diff --git a/youtube_dl/extractor/kontrtube.py b/youtube_dl/extractor/kontrtube.py
new file mode 100644
index 000000000..1b45b67b0
--- /dev/null
+++ b/youtube_dl/extractor/kontrtube.py
@@ -0,0 +1,66 @@
+# encoding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class KontrTubeIE(InfoExtractor):
+ IE_NAME = 'kontrtube'
+ IE_DESC = 'KontrTube.ru - Труба зовёт'
+ _VALID_URL = r'http://(?:www\.)?kontrtube\.ru/videos/(?P<id>\d+)/.+'
+
+ _TEST = {
+ 'url': 'http://www.kontrtube.ru/videos/2678/nad-olimpiyskoy-derevney-v-sochi-podnyat-rossiyskiy-flag/',
+ 'md5': '975a991a4926c9a85f383a736a2e6b80',
+ 'info_dict': {
+ 'id': '2678',
+ 'ext': 'mp4',
+ 'title': 'Над олимпийской деревней в Сочи поднят российский флаг',
+ 'description': 'md5:80edc4c613d5887ae8ccf1d59432be41',
+ 'thumbnail': 'http://www.kontrtube.ru/contents/videos_screenshots/2000/2678/preview.mp4.jpg',
+ 'duration': 270,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id, 'Downloading page')
+
+ video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
+ thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
+ title = self._html_search_regex(r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage,
+ 'video title')
+ description = self._html_search_meta('description', webpage, 'video description')
+
+ mobj = re.search(r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
+ webpage)
+ duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
+
+ view_count = self._html_search_regex(r'<div class="col_2">Просмотров: <span>(\d+)</span></div>', webpage,
+ 'view count', fatal=False)
+ view_count = int(view_count) if view_count is not None else None
+
+ comment_count = None
+ comment_str = self._html_search_regex(r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count',
+ fatal=False)
+ if comment_str.startswith('комментариев нет'):
+ comment_count = 0
+ else:
+ mobj = re.search(r'\d+ из (?P<total>\d+) комментариев', comment_str)
+ if mobj:
+ comment_count = int(mobj.group('total'))
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'thumbnail': thumbnail,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ } \ No newline at end of file
diff --git a/youtube_dl/extractor/lifenews.py b/youtube_dl/extractor/lifenews.py
index 051259857..7b7185f9a 100644
--- a/youtube_dl/extractor/lifenews.py
+++ b/youtube_dl/extractor/lifenews.py
@@ -4,19 +4,23 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import unified_strdate
+from ..utils import (
+ int_or_none,
+ unified_strdate
+)
class LifeNewsIE(InfoExtractor):
IE_NAME = 'lifenews'
IE_DESC = 'LIFE | NEWS'
_VALID_URL = r'http://lifenews\.ru/(?:mobile/)?news/(?P<id>\d+)'
-
+
_TEST = {
'url': 'http://lifenews.ru/news/126342',
- 'file': '126342.mp4',
'md5': 'e1b50a5c5fb98a6a544250f2e0db570a',
'info_dict': {
+ 'id': '126342',
+ 'ext': 'mp4',
'title': 'МВД разыскивает мужчин, оставивших в IKEA сумку с автоматом',
'description': 'Камеры наблюдения гипермаркета зафиксировали троих мужчин, спрятавших оружейный арсенал в камере хранения.',
'thumbnail': 'http://lifenews.ru/static/posts/2014/1/126342/.video.jpg',
@@ -32,7 +36,7 @@ class LifeNewsIE(InfoExtractor):
video_url = self._html_search_regex(
r'<video.*?src="([^"]+)".*?></video>', webpage, 'video URL')
-
+
thumbnail = self._html_search_regex(
r'<video.*?poster="([^"]+)".*?"></video>', webpage, 'video thumbnail')
@@ -44,12 +48,14 @@ class LifeNewsIE(InfoExtractor):
description = self._og_search_description(webpage)
view_count = self._html_search_regex(
- r'<div class=\'views\'>(\d+)</div>', webpage, 'view count')
+ r'<div class=\'views\'>(\d+)</div>', webpage, 'view count', fatal=False)
comment_count = self._html_search_regex(
- r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count')
+ r'<div class=\'comments\'>(\d+)</div>', webpage, 'comment count', fatal=False)
upload_date = self._html_search_regex(
- r'<time datetime=\'([^\']+)\'>', webpage, 'upload date')
+ r'<time datetime=\'([^\']+)\'>', webpage, 'upload date',fatal=False)
+ if upload_date is not None:
+ upload_date = unified_strdate(upload_date)
return {
'id': video_id,
@@ -57,7 +63,7 @@ class LifeNewsIE(InfoExtractor):
'thumbnail': thumbnail,
'title': title,
'description': description,
- 'view_count': view_count,
- 'comment_count': comment_count,
- 'upload_date': unified_strdate(upload_date),
+ 'view_count': int_or_none(view_count),
+ 'comment_count': int_or_none(comment_count),
+ 'upload_date': upload_date,
} \ No newline at end of file
diff --git a/youtube_dl/extractor/slideshare.py b/youtube_dl/extractor/slideshare.py
index afc3001b5..9c62825cc 100644
--- a/youtube_dl/extractor/slideshare.py
+++ b/youtube_dl/extractor/slideshare.py
@@ -1,3 +1,5 @@
+from __future__ import unicode_literals
+
import re
import json
@@ -12,11 +14,12 @@ class SlideshareIE(InfoExtractor):
_VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)'
_TEST = {
- u'url': u'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
- u'file': u'25665706.mp4',
- u'info_dict': {
- u'title': u'Managing Scale and Complexity',
- u'description': u'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix',
+ 'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
+ 'info_dict': {
+ 'id': '25665706',
+ 'ext': 'mp4',
+ 'title': 'Managing Scale and Complexity',
+ 'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.',
},
}
@@ -26,15 +29,17 @@ class SlideshareIE(InfoExtractor):
webpage = self._download_webpage(url, page_title)
slideshare_obj = self._search_regex(
r'var slideshare_object = ({.*?}); var user_info =',
- webpage, u'slideshare object')
+ webpage, 'slideshare object')
info = json.loads(slideshare_obj)
- if info['slideshow']['type'] != u'video':
- raise ExtractorError(u'Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)
+ if info['slideshow']['type'] != 'video':
+ raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)
doc = info['doc']
bucket = info['jsplayer']['video_bucket']
ext = info['jsplayer']['video_extension']
video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
+ description = self._html_search_regex(
+ r'<p class="description.*?"[^>]*>(.*?)</p>', webpage, 'description')
return {
'_type': 'video',
@@ -43,5 +48,5 @@ class SlideshareIE(InfoExtractor):
'ext': ext,
'url': video_url,
'thumbnail': info['slideshow']['pin_image_url'],
- 'description': self._og_search_description(webpage),
+ 'description': description,
}
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index e038c7752..8c2c4dfa2 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -34,6 +34,7 @@ from ..utils import (
unified_strdate,
orderedSet,
write_json_file,
+ uppercase_escape,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
@@ -136,7 +137,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
(?:https?://|//)? # http(s):// or protocol-independent URL (optional)
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
- (?:www\.)?pwnyoutube\.com|
+ (?:www\.)?pwnyoutube\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
@@ -1590,11 +1591,10 @@ class YoutubeChannelIE(InfoExtractor):
# Download all channel pages using the json-based channel_ajax query
for pagenum in itertools.count(1):
url = self._MORE_PAGES_URL % (pagenum, channel_id)
- page = self._download_webpage(url, channel_id,
- u'Downloading page #%s' % pagenum)
-
- page = json.loads(page)
-
+ page = self._download_json(
+ url, channel_id, note=u'Downloading page #%s' % pagenum,
+ transform_source=uppercase_escape)
+
ids_in_page = self.extract_videos_from_page(page['content_html'])
video_ids.extend(ids_in_page)