aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorremitamine <remitamine@gmail.com>2016-02-06 06:26:02 +0100
committerremitamine <remitamine@gmail.com>2016-02-06 06:26:02 +0100
commit66159b38aad38d55f84a358a0c2ed2add9a2946d (patch)
tree4768cc49d929039a5a3c866600b7c0aee475f35d
parent23d17e4bebdff9a5defa012e4fa5cd99db605919 (diff)
parent255732f0d33268aeababb1b3ce37a1defb5bc965 (diff)
downloadyoutube-dl-66159b38aad38d55f84a358a0c2ed2add9a2946d.tar.xz
Merge pull request #8408 from remitamine/dash
Add generic support for mpd manifests(dash formats)
-rw-r--r--youtube_dl/downloader/dash.py7
-rw-r--r--youtube_dl/extractor/common.py206
-rw-r--r--youtube_dl/extractor/facebook.py5
-rw-r--r--youtube_dl/extractor/vevo.py8
-rw-r--r--youtube_dl/extractor/youtube.py10
5 files changed, 159 insertions, 77 deletions
diff --git a/youtube_dl/downloader/dash.py b/youtube_dl/downloader/dash.py
index 535f2a7fc..b0070aead 100644
--- a/youtube_dl/downloader/dash.py
+++ b/youtube_dl/downloader/dash.py
@@ -40,9 +40,10 @@ class DashSegmentsFD(FileDownloader):
return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
with open(tmpfilename, 'wb') as outf:
- append_url_to_file(
- outf, combine_url(base_url, info_dict['initialization_url']),
- 'initialization segment')
+ if info_dict.get('initialization_url'):
+ append_url_to_file(
+ outf, combine_url(base_url, info_dict['initialization_url']),
+ 'initialization segment')
for i, segment_url in enumerate(segment_urls):
segment_len = append_url_to_file(
outf, combine_url(base_url, segment_url),
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 199a04d1c..d9f31daaa 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1,4 +1,4 @@
-from __future__ import unicode_literals
+from __future__ import unicode_literals, division
import base64
import datetime
@@ -10,6 +10,7 @@ import re
import socket
import sys
import time
+import math
from ..compat import (
compat_cookiejar,
@@ -44,6 +45,7 @@ from ..utils import (
xpath_text,
xpath_with_ns,
determine_protocol,
+ parse_duration,
)
@@ -1330,81 +1332,155 @@ class InfoExtractor(object):
})
return entries
- def _download_dash_manifest(self, dash_manifest_url, video_id, fatal=True):
- return self._download_xml(
- dash_manifest_url, video_id,
- note='Downloading DASH manifest',
- errnote='Could not download DASH manifest',
+ def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
+ res = self._download_webpage_handle(
+ mpd_url, video_id,
+ note=note or 'Downloading MPD manifest',
+ errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
+ if res is False:
+ return []
+ mpd, urlh = res
+ mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
- def _extract_dash_manifest_formats(self, dash_manifest_url, video_id, fatal=True, namespace=None, formats_dict={}):
- dash_doc = self._download_dash_manifest(dash_manifest_url, video_id, fatal)
- if dash_doc is False:
+ return self._parse_mpd(
+ compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
+
+ def _parse_mpd(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
+ if mpd_doc.get('type') == 'dynamic':
return []
- return self._parse_dash_manifest(
- dash_doc, namespace=namespace, formats_dict=formats_dict)
+ namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace')
- def _parse_dash_manifest(self, dash_doc, namespace=None, formats_dict={}):
def _add_ns(path):
return self._xpath_ns(path, namespace)
+ def is_drm_protected(element):
+ return element.find(_add_ns('ContentProtection')) is not None
+
+ def extract_multisegment_info(element, ms_parent_info):
+ ms_info = ms_parent_info.copy()
+ segment_list = element.find(_add_ns('SegmentList'))
+ if segment_list is not None:
+ segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
+ if segment_urls_e:
+ ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
+ initialization = segment_list.find(_add_ns('Initialization'))
+ if initialization is not None:
+ ms_info['initialization_url'] = initialization.attrib['sourceURL']
+ else:
+ segment_template = element.find(_add_ns('SegmentTemplate'))
+ if segment_template is not None:
+ start_number = segment_template.get('startNumber')
+ if start_number:
+ ms_info['start_number'] = int(start_number)
+ segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
+ if segment_timeline is not None:
+ s_e = segment_timeline.findall(_add_ns('S'))
+ if s_e:
+ ms_info['total_number'] = 0
+ for s in s_e:
+ ms_info['total_number'] += 1 + int(s.get('r', '0'))
+ else:
+ timescale = segment_template.get('timescale')
+ if timescale:
+ ms_info['timescale'] = int(timescale)
+ segment_duration = segment_template.get('duration')
+ if segment_duration:
+ ms_info['segment_duration'] = int(segment_duration)
+ media_template = segment_template.get('media')
+ if media_template:
+ ms_info['media_template'] = media_template
+ initialization = segment_template.get('initialization')
+ if initialization:
+ ms_info['initialization_url'] = initialization
+ else:
+ initialization = segment_template.find(_add_ns('Initialization'))
+ if initialization is not None:
+ ms_info['initialization_url'] = initialization.attrib['sourceURL']
+ return ms_info
+
+ mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
- for a in dash_doc.findall('.//' + _add_ns('AdaptationSet')):
- mime_type = a.attrib.get('mimeType')
- for r in a.findall(_add_ns('Representation')):
- mime_type = r.attrib.get('mimeType') or mime_type
- url_el = r.find(_add_ns('BaseURL'))
- if mime_type == 'text/vtt':
- # TODO implement WebVTT downloading
- pass
- elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
- segment_list = r.find(_add_ns('SegmentList'))
- format_id = r.attrib['id']
- video_url = url_el.text if url_el is not None else None
- filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
- f = {
- 'format_id': format_id,
- 'url': video_url,
- 'width': int_or_none(r.attrib.get('width')),
- 'height': int_or_none(r.attrib.get('height')),
- 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
- 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
- 'filesize': filesize,
- 'fps': int_or_none(r.attrib.get('frameRate')),
- }
- if segment_list is not None:
- initialization_url = segment_list.find(_add_ns('Initialization')).attrib['sourceURL']
- f.update({
- 'initialization_url': initialization_url,
- 'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall(_add_ns('SegmentURL'))],
- 'protocol': 'http_dash_segments',
- })
- if not f.get('url'):
- f['url'] = initialization_url
- try:
- existing_format = next(
- fo for fo in formats
- if fo['format_id'] == format_id)
- except StopIteration:
- full_info = formats_dict.get(format_id, {}).copy()
- full_info.update(f)
- codecs = r.attrib.get('codecs')
- if codecs:
- if mime_type.startswith('video/'):
- vcodec, acodec = codecs, 'none'
- else: # mime_type.startswith('audio/')
- vcodec, acodec = 'none', codecs
-
- full_info.update({
- 'vcodec': vcodec,
- 'acodec': acodec,
+ for period in mpd_doc.findall(_add_ns('Period')):
+ period_duration = parse_duration(period.get('duration')) or mpd_duration
+ period_ms_info = extract_multisegment_info(period, {
+ 'start_number': 1,
+ 'timescale': 1,
+ })
+ for adaptation_set in period.findall(_add_ns('AdaptationSet')):
+ if is_drm_protected(adaptation_set):
+ continue
+ adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
+ for representation in adaptation_set.findall(_add_ns('Representation')):
+ if is_drm_protected(representation):
+ continue
+ representation_attrib = adaptation_set.attrib.copy()
+ representation_attrib.update(representation.attrib)
+ mime_type = representation_attrib.get('mimeType')
+ content_type = mime_type.split('/')[0] if mime_type else representation_attrib.get('contentType')
+ if content_type == 'text':
+ # TODO implement WebVTT downloading
+ pass
+ elif content_type == 'video' or content_type == 'audio':
+ base_url = ''
+ for element in (representation, adaptation_set, period, mpd_doc):
+ base_url_e = element.find(_add_ns('BaseURL'))
+ if base_url_e is not None:
+ base_url = base_url_e.text + base_url
+ if re.match(r'^https?://', base_url):
+ break
+ if not re.match(r'^https?://', base_url):
+ base_url = mpd_base_url + base_url
+ representation_id = representation_attrib.get('id')
+ lang = representation_attrib.get('lang')
+ f = {
+ 'format_id': mpd_id or representation_id,
+ 'url': base_url,
+ 'width': int_or_none(representation_attrib.get('width')),
+ 'height': int_or_none(representation_attrib.get('height')),
+ 'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
+ 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
+ 'fps': int_or_none(representation_attrib.get('frameRate')),
+ 'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
+ 'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
+ 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
+ 'format_note': 'DASH %s' % content_type,
+ }
+ representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
+ if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
+ if 'total_number' not in representation_ms_info and 'segment_duration':
+ segment_duration = representation_ms_info['segment_duration'] / representation_ms_info['timescale']
+ representation_ms_info['total_number'] = int(math.ceil(period_duration / segment_duration))
+ media_template = representation_ms_info['media_template']
+ media_template = media_template.replace('$RepresentationID$', representation_id)
+ media_template = re.sub(r'\$(Number|Bandwidth)(?:%(0\d+)d)?\$', r'%(\1)\2d', media_template)
+ media_template.replace('$$', '$')
+ representation_ms_info['segment_urls'] = [media_template % {'Number': segment_number, 'Bandwidth': representation_attrib.get('bandwidth')} for segment_number in range(representation_ms_info['start_number'], representation_ms_info['total_number'] + representation_ms_info['start_number'])]
+ if 'segment_urls' in representation_ms_info:
+ f.update({
+ 'segment_urls': representation_ms_info['segment_urls'],
+ 'protocol': 'http_dash_segments',
})
- formats.append(full_info)
+ if 'initialization_url' in representation_ms_info:
+ initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
+ f.update({
+ 'initialization_url': initialization_url,
+ })
+ if not f.get('url'):
+ f['url'] = initialization_url
+ try:
+ existing_format = next(
+ fo for fo in formats
+ if fo['format_id'] == representation_id)
+ except StopIteration:
+ full_info = formats_dict.get(representation_id, {}).copy()
+ full_info.update(f)
+ formats.append(full_info)
+ else:
+ existing_format.update(f)
else:
- existing_format.update(f)
- else:
- self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
+ self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _live_title(self, name):
diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index b6d1180f0..9c751178f 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -215,9 +215,8 @@ class FacebookIE(InfoExtractor):
})
dash_manifest = f[0].get('dash_manifest')
if dash_manifest:
- formats.extend(self._parse_dash_manifest(
- compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest)),
- namespace='urn:mpeg:dash:schema:mpd:2011'))
+ formats.extend(self._parse_mpd(
+ compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
if not formats:
raise ExtractorError('Cannot find video formats')
diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py
index 35fcff1b2..152fef42e 100644
--- a/youtube_dl/extractor/vevo.py
+++ b/youtube_dl/extractor/vevo.py
@@ -197,8 +197,14 @@ class VevoIE(InfoExtractor):
if not version_url:
continue
- if '.mpd' in version_url or '.ism' in version_url:
+ if '.ism' in version_url:
continue
+ elif '.mpd' in version_url:
+ formats.extend(self._extract_mpd_formats(
+ version_url, video_id, mpd_id='dash-%s' % version,
+ note='Downloading %s MPD information' % version,
+ errnote='Failed to download %s MPD information' % version,
+ fatal=False))
elif '.m3u8' in version_url:
formats.extend(self._extract_m3u8_formats(
version_url, video_id, 'mp4', 'm3u8_native',
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 9b346d27a..63abe5477 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1464,7 +1464,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
- for dash_manifest_url in dash_mpds:
+ for mpd_url in dash_mpds:
dash_formats = {}
try:
def decrypt_sig(mobj):
@@ -1472,11 +1472,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
- dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
+ mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
- for df in self._extract_dash_manifest_formats(
- dash_manifest_url, video_id, fatal=dash_mpd_fatal,
- namespace='urn:mpeg:DASH:schema:MPD:2011', formats_dict=self._formats):
+ for df in self._extract_mpd_formats(
+ mpd_url, video_id, fatal=dash_mpd_fatal,
+ formats_dict=self._formats):
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df