aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/InfoExtractors.py
diff options
context:
space:
mode:
authorDave Vasilevsky <dave@vasilevsky.ca>2012-12-16 03:50:41 -0500
committerDave Vasilevsky <dave@vasilevsky.ca>2012-12-16 03:50:41 -0500
commit0b40544f290de329679aebf06e98056e707dd7e1 (patch)
tree47f735ec28a06ae5a638d8d9359d5dd9b5872eff /youtube_dl/InfoExtractors.py
parent0be41ec241d8308378c134d803f6b67b93a6c8de (diff)
Preliminary support for twitch.tv and justin.tv
Diffstat (limited to 'youtube_dl/InfoExtractors.py')
-rwxr-xr-x[-rw-r--r--]youtube_dl/InfoExtractors.py56
1 files changed, 56 insertions, 0 deletions
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 3a6e84ebb..c5ab8907b 100644..100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -3634,3 +3634,59 @@ class NBAIE(InfoExtractor):
'description': _findProp(r'<div class="description">(.*?)</h1>'),
}
return [info]
+
+class JustinTVIE(InfoExtractor):
+ """Information extractor for justin.tv and twitch.tv"""
+
+# _VALID_URL = r"""^(?:http(?:s?)://)?www\.(?:justin|twitch)\.tv/
+# ([^/]+)(?:/b/([^/]+))?/?(?:#.*)?$"""
+ _VALID_URL = r'^http://www.twitch.tv/(.*)$'
+ IE_NAME = u'justin.tv'
+
+ def report_extraction(self, file_id):
+ """Report information extraction."""
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id))
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ api = 'http://api.justin.tv'
+ video_id = mobj.group(mobj.lastindex)
+ if mobj.lastindex == 1:
+ api += '/channel/archives/%s.json?limit=100'
+ else:
+ api += '/clip/show/%s.json'
+ api = api % (video_id,)
+
+ self.report_extraction(video_id)
+ # TODO: multiple pages
+ # TODO: One broadcast may be split into multiple videos. The key
+ # 'broadcast_id' is the same for all parts, and 'broadcast_part'
+ # starts at 1 and increases. Can we treat all parts as one video?
+ try:
+ urlh = compat_urllib_request.urlopen(api)
+ webpage_bytes = urlh.read()
+ webpage = webpage_bytes.decode('utf-8', 'ignore')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err))
+ return
+
+ response = json.loads(webpage)
+ info = []
+ for clip in response:
+ video_url = clip['video_file_url']
+ if video_url:
+ video_extension = os.path.splitext(video_url)[1][1:]
+ video_date = re.sub('-', '', clip['created_on'][:10])
+ info.append({
+ 'id': clip['id'],
+ 'url': video_url,
+ 'title': clip['title'],
+ 'uploader': clip['user_id'] or clip['channel_id'],
+ 'upload_date': video_date,
+ 'ext': video_extension,
+ })
+ return info