aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
authorPhilipp Hagemeister <phihag@phihag.de>2012-12-27 01:38:41 +0100
committerPhilipp Hagemeister <phihag@phihag.de>2012-12-27 01:38:41 +0100
commitd0d4f277da6f1f74e5adcf532b17d01d52943ba8 (patch)
treeeab753a603c0741451a66f7ebbce7959b6fa356a /youtube_dl
parent99b0a1292b6ceccca576c8555e03bc13fb8e4bdd (diff)
TweetReel IE
Diffstat (limited to 'youtube_dl')
-rwxr-xr-xyoutube_dl/InfoExtractors.py67
-rw-r--r--youtube_dl/__init__.py1
2 files changed, 65 insertions, 3 deletions
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index 210348080..890ff7378 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -32,10 +32,10 @@ class InfoExtractor(object):
id: Video identifier.
url: Final video URL.
- uploader: Full name of the video uploader, unescaped.
- upload_date: Video upload date (YYYYMMDD).
title: Video title, unescaped.
ext: Video filename extension.
+ uploader: Full name of the video uploader.
+ upload_date: Video upload date (YYYYMMDD).
The following fields are optional:
@@ -101,6 +101,9 @@ class InfoExtractor(object):
"""Real extraction process. Redefine in subclasses."""
pass
+ @property
+ def IE_NAME(self):
+ return type(self).__name__[:-2]
class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com."""
@@ -3672,7 +3675,6 @@ class FunnyOrDieIE(InfoExtractor):
if not m:
self._downloader.trouble(u'ERROR: unable to find video information')
video_url = unescapeHTML(m.group('url'))
- print(video_url)
m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage)
if not m:
@@ -3693,3 +3695,62 @@ class FunnyOrDieIE(InfoExtractor):
'description': desc,
}
return [info]
+
+class TweetReelIE(InfoExtractor):
+ _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$'
+
+ def report_extraction(self, video_id):
+ self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id))
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ if mobj is None:
+ self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
+ return
+
+ video_id = mobj.group('id')
+ self.report_extraction(video_id)
+ try:
+ urlh = compat_urllib_request.urlopen(url)
+ webpage_bytes = urlh.read()
+ webpage = webpage_bytes.decode('utf-8', 'ignore')
+ except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
+ self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
+ return
+
+ m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage)
+ if not m:
+ self._downloader.trouble(u'ERROR: Cannot find status ID')
+ status_id = m.group(1)
+
+ m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL)
+ if not m:
+ self._downloader.trouble(u'WARNING: Cannot find description')
+ desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip()
+
+ m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL)
+ if not m:
+ self._downloader.trouble(u'ERROR: Cannot find uploader')
+ uploader = unescapeHTML(m.group('uploader'))
+ uploader_id = unescapeHTML(m.group('uploader_id'))
+
+ m = re.search(r'<span unixtime="([0-9]+)"', webpage)
+ if not m:
+ self._downloader.trouble(u'ERROR: Cannot find upload date')
+ upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d')
+
+ title = desc
+ video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov'
+
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': 'mov',
+ 'title': title,
+ 'description': desc,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'internal_id': status_id,
+ 'upload_date': upload_date
+ }
+ return [info]
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index d044797f0..d12ece21e 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -403,6 +403,7 @@ def gen_extractors():
NBAIE(),
JustinTVIE(),
FunnyOrDieIE(),
+ TweetReelIE(),
GenericIE()
]