aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl/InfoExtractors.py
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl/InfoExtractors.py')
-rwxr-xr-xyoutube_dl/InfoExtractors.py178
1 files changed, 123 insertions, 55 deletions
diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py
index e714fa6b0..dd4a776e4 100755
--- a/youtube_dl/InfoExtractors.py
+++ b/youtube_dl/InfoExtractors.py
@@ -48,7 +48,7 @@ class InfoExtractor(object):
uploader_id: Nickname or id of the video uploader.
location: Physical location of the video.
player_url: SWF Player URL (used for rtmpdump).
- subtitles: The .srt file contents.
+ subtitles: The subtitle file contents.
urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen
@@ -126,8 +126,14 @@ class InfoExtractor(object):
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
""" Returns the data of the page as a string """
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
+ content_type = urlh.headers.get('Content-Type', '')
+ m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
+ if m:
+ encoding = m.group(1)
+ else:
+ encoding = 'utf-8'
webpage_bytes = urlh.read()
- return webpage_bytes.decode('utf-8', 'replace')
+ return webpage_bytes.decode(encoding, 'replace')
#Methods for following #608
#They set the correct value of the '_type' key
@@ -236,7 +242,16 @@ class YoutubeIE(InfoExtractor):
def report_video_subtitles_download(self, video_id):
"""Report attempt to download video info webpage."""
- self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id)
+ self._downloader.to_screen(u'[youtube] %s: Checking available subtitles' % video_id)
+
+ def report_video_subtitles_request(self, video_id, sub_lang, format):
+ """Report attempt to download video info webpage."""
+ self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format))
+
+ def report_video_subtitles_available(self, video_id, sub_lang_list):
+ """Report available subtitles."""
+ sub_lang = ",".join(list(sub_lang_list.keys()))
+ self._downloader.to_screen(u'[youtube] %s: Available subtitles for video: %s' % (video_id, sub_lang))
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
@@ -250,55 +265,63 @@ class YoutubeIE(InfoExtractor):
"""Indicate the download will use the RTMP protocol."""
self._downloader.to_screen(u'[youtube] RTMP download detected')
- def _closed_captions_xml_to_srt(self, xml_string):
- srt = ''
- texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE)
- # TODO parse xml instead of regex
- for n, (start, dur_tag, dur, caption) in enumerate(texts):
- if not dur: dur = '4'
- start = float(start)
- end = start + float(dur)
- start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000)
- end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000)
- caption = unescapeHTML(caption)
- caption = unescapeHTML(caption) # double cycle, intentional
- srt += str(n+1) + '\n'
- srt += start + ' --> ' + end + '\n'
- srt += caption + '\n\n'
- return srt
-
- def _extract_subtitles(self, video_id):
+ def _get_available_subtitles(self, video_id):
self.report_video_subtitles_download(video_id)
request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
try:
- srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
+ sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
- srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
- srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
- if not srt_lang_list:
- return (u'WARNING: video has no closed captions', None)
- if self._downloader.params.get('subtitleslang', False):
- srt_lang = self._downloader.params.get('subtitleslang')
- elif 'en' in srt_lang_list:
- srt_lang = 'en'
- else:
- srt_lang = list(srt_lang_list.keys())[0]
- if not srt_lang in srt_lang_list:
- return (u'WARNING: no closed captions found in the specified language', None)
+ sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
+ sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
+ if not sub_lang_list:
+ return (u'WARNING: video doesn\'t have subtitles', None)
+ return sub_lang_list
+
+ def _list_available_subtitles(self, video_id):
+ sub_lang_list = self._get_available_subtitles(video_id)
+ self.report_video_subtitles_available(video_id, sub_lang_list)
+
+ def _request_subtitle(self, sub_lang, sub_name, video_id, format):
+ self.report_video_subtitles_request(video_id, sub_lang, format)
params = compat_urllib_parse.urlencode({
- 'lang': srt_lang,
- 'name': srt_lang_list[srt_lang].encode('utf-8'),
+ 'lang': sub_lang,
+ 'name': sub_name,
'v': video_id,
+ 'fmt': format,
})
url = 'http://www.youtube.com/api/timedtext?' + params
try:
- srt_xml = compat_urllib_request.urlopen(url).read().decode('utf-8')
+ sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
- if not srt_xml:
+ if not sub:
return (u'WARNING: Did not fetch video subtitles', None)
- return (None, self._closed_captions_xml_to_srt(srt_xml))
+ return (None, sub_lang, sub)
+
+ def _extract_subtitle(self, video_id):
+ sub_lang_list = self._get_available_subtitles(video_id)
+ sub_format = self._downloader.params.get('subtitlesformat')
+ if self._downloader.params.get('subtitleslang', False):
+ sub_lang = self._downloader.params.get('subtitleslang')
+ elif 'en' in sub_lang_list:
+ sub_lang = 'en'
+ else:
+ sub_lang = list(sub_lang_list.keys())[0]
+ if not sub_lang in sub_lang_list:
+ return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None)
+
+ subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+ return [subtitle]
+
+ def _extract_all_subtitles(self, video_id):
+ sub_lang_list = self._get_available_subtitles(video_id)
+ sub_format = self._downloader.params.get('subtitlesformat')
+ subtitles = []
+ for sub_lang in sub_lang_list:
+ subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
+ subtitles.append(subtitle)
+ return subtitles
def _print_formats(self, formats):
print('Available formats:')
@@ -519,12 +542,26 @@ class YoutubeIE(InfoExtractor):
else:
video_description = ''
- # closed captions
+ # subtitles
video_subtitles = None
+
if self._downloader.params.get('writesubtitles', False):
- (srt_error, video_subtitles) = self._extract_subtitles(video_id)
- if srt_error:
- self._downloader.trouble(srt_error)
+ video_subtitles = self._extract_subtitle(video_id)
+ if video_subtitles:
+ (sub_error, sub_lang, sub) = video_subtitles[0]
+ if sub_error:
+ self._downloader.trouble(sub_error)
+
+ if self._downloader.params.get('allsubtitles', False):
+ video_subtitles = self._extract_all_subtitles(video_id)
+ for video_subtitle in video_subtitles:
+ (sub_error, sub_lang, sub) = video_subtitle
+ if sub_error:
+ self._downloader.trouble(sub_error)
+
+ if self._downloader.params.get('listsubtitles', False):
+ sub_lang_list = self._list_available_subtitles(video_id)
+ return
if 'length_seconds' not in video_info:
self._downloader.trouble(u'WARNING: unable to extract video duration')
@@ -1299,7 +1336,8 @@ class GenericIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
- self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
+ if not self._downloader.params.get('test', False):
+ self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
@@ -1369,13 +1407,8 @@ class GenericIE(InfoExtractor):
if new_url: return [self.url_result(new_url)]
video_id = url.split('/')[-1]
- request = compat_urllib_request.Request(url)
try:
- self.report_download_webpage(video_id)
- webpage = compat_urllib_request.urlopen(request).read()
- except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
- self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
- return
+ webpage = self._download_webpage(url, video_id)
except ValueError as err:
# since this is the last-resort InfoExtractor, if
# this error is thrown, it'll be thrown here
@@ -2576,7 +2609,7 @@ class EscapistIE(InfoExtractor):
'uploader': showName,
'upload_date': None,
'title': showName,
- 'ext': 'flv',
+ 'ext': 'mp4',
'thumbnail': imgUrl,
'description': description,
'player_url': playerUrl,
@@ -3972,11 +4005,11 @@ class KeekIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
title = unescapeHTML(m.group('title'))
- m = re.search(r'<div class="bio-names-and-report">[\s\n]+<h4>(?P<uploader>\w+)</h4>', webpage)
- uploader = unescapeHTML(m.group('uploader'))
+ m = re.search(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>', webpage)
+ uploader = clean_html(m.group('uploader'))
info = {
- 'id':video_id,
- 'url':video_url,
+ 'id': video_id,
+ 'url': video_url,
'ext': 'mp4',
'title': title,
'thumbnail': thumbnail,
@@ -4113,6 +4146,40 @@ class MySpassIE(InfoExtractor):
}
return [info]
+class SpiegelIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?$'
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('videoID')
+
+ webpage = self._download_webpage(url, video_id)
+ m = re.search(r'<div class="spVideoTitle">(.*?)</div>', webpage)
+ if not m:
+ raise ExtractorError(u'Cannot find title')
+ video_title = unescapeHTML(m.group(1))
+
+ xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
+ xml_code = self._download_webpage(xml_url, video_id,
+ note=u'Downloading XML', errnote=u'Failed to download XML')
+
+ idoc = xml.etree.ElementTree.fromstring(xml_code)
+ last_type = idoc[-1]
+ filename = last_type.findall('./filename')[0].text
+ duration = float(last_type.findall('./duration')[0].text)
+
+ video_url = 'http://video2.spiegel.de/flash/' + filename
+ video_ext = filename.rpartition('.')[2]
+ info = {
+ 'id': video_id,
+ 'url': video_url,
+ 'ext': video_ext,
+ 'title': video_title,
+ 'duration': duration,
+ }
+ return [info]
+
+
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
@@ -4161,6 +4228,7 @@ def gen_extractors():
KeekIE(),
TEDIE(),
MySpassIE(),
+ SpiegelIE(),
GenericIE()
]