aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl')
-rwxr-xr-xyoutube_dl/YoutubeDL.py423
-rw-r--r--youtube_dl/__init__.py6
-rw-r--r--youtube_dl/compat.py25
-rw-r--r--youtube_dl/downloader/__init__.py2
-rw-r--r--youtube_dl/downloader/dash.py66
-rw-r--r--youtube_dl/downloader/external.py45
-rw-r--r--youtube_dl/downloader/f4m.py123
-rw-r--r--youtube_dl/downloader/fragment.py111
-rw-r--r--youtube_dl/downloader/hls.py83
-rw-r--r--youtube_dl/downloader/http.py19
-rw-r--r--youtube_dl/extractor/__init__.py48
-rw-r--r--youtube_dl/extractor/abc.py51
-rw-r--r--youtube_dl/extractor/academicearth.py2
-rw-r--r--youtube_dl/extractor/adultswim.py2
-rw-r--r--youtube_dl/extractor/airmozilla.py4
-rw-r--r--youtube_dl/extractor/aljazeera.py1
-rw-r--r--youtube_dl/extractor/appleconnect.py50
-rw-r--r--youtube_dl/extractor/ard.py203
-rw-r--r--youtube_dl/extractor/bbc.py780
-rw-r--r--youtube_dl/extractor/bbccouk.py379
-rw-r--r--youtube_dl/extractor/breakcom.py1
-rw-r--r--youtube_dl/extractor/canalplus.py12
-rw-r--r--youtube_dl/extractor/ceskatelevize.py152
-rw-r--r--youtube_dl/extractor/cinemassacre.py7
-rw-r--r--youtube_dl/extractor/clipfish.py67
-rw-r--r--youtube_dl/extractor/comcarcoff.py2
-rw-r--r--youtube_dl/extractor/common.py350
-rw-r--r--youtube_dl/extractor/crunchyroll.py65
-rw-r--r--youtube_dl/extractor/dailymotion.py192
-rw-r--r--youtube_dl/extractor/dcn.py84
-rw-r--r--youtube_dl/extractor/dhm.py30
-rw-r--r--youtube_dl/extractor/dumpert.py10
-rw-r--r--youtube_dl/extractor/eagleplatform.py2
-rw-r--r--youtube_dl/extractor/eroprofile.py3
-rw-r--r--youtube_dl/extractor/esri.py74
-rw-r--r--youtube_dl/extractor/facebook.py6
-rw-r--r--youtube_dl/extractor/fc2.py15
-rw-r--r--youtube_dl/extractor/folketinget.py4
-rw-r--r--youtube_dl/extractor/fourtube.py1
-rw-r--r--youtube_dl/extractor/foxnews.py15
-rw-r--r--youtube_dl/extractor/francetv.py97
-rw-r--r--youtube_dl/extractor/funnyordie.py2
-rw-r--r--youtube_dl/extractor/gdcvault.py33
-rw-r--r--youtube_dl/extractor/generic.py172
-rw-r--r--youtube_dl/extractor/globo.py11
-rw-r--r--youtube_dl/extractor/gorillavid.py20
-rw-r--r--youtube_dl/extractor/imgur.py27
-rw-r--r--youtube_dl/extractor/indavideo.py142
-rw-r--r--youtube_dl/extractor/iqiyi.py34
-rw-r--r--youtube_dl/extractor/ir90tv.py42
-rw-r--r--youtube_dl/extractor/kaltura.py34
-rw-r--r--youtube_dl/extractor/kontrtube.py40
-rw-r--r--youtube_dl/extractor/krasview.py3
-rw-r--r--youtube_dl/extractor/kuwo.py1
-rw-r--r--youtube_dl/extractor/lecture2go.py62
-rw-r--r--youtube_dl/extractor/letv.py3
-rw-r--r--youtube_dl/extractor/libsyn.py30
-rw-r--r--youtube_dl/extractor/lynda.py19
-rw-r--r--youtube_dl/extractor/mailru.py2
-rw-r--r--youtube_dl/extractor/mdr.py2
-rw-r--r--youtube_dl/extractor/mit.py10
-rw-r--r--youtube_dl/extractor/moniker.py18
-rw-r--r--youtube_dl/extractor/mtv.py74
-rw-r--r--youtube_dl/extractor/musicvault.py63
-rw-r--r--youtube_dl/extractor/mwave.py58
-rw-r--r--youtube_dl/extractor/nationalgeographic.py37
-rw-r--r--youtube_dl/extractor/nbc.py31
-rw-r--r--youtube_dl/extractor/niconico.py6
-rw-r--r--youtube_dl/extractor/nowtv.py77
-rw-r--r--youtube_dl/extractor/nowvideo.py2
-rw-r--r--youtube_dl/extractor/npo.py1
-rw-r--r--youtube_dl/extractor/odnoklassniki.py63
-rw-r--r--youtube_dl/extractor/pbs.py34
-rw-r--r--youtube_dl/extractor/periscope.py99
-rw-r--r--youtube_dl/extractor/playtvak.py181
-rw-r--r--youtube_dl/extractor/pluralsight.py207
-rw-r--r--youtube_dl/extractor/porn91.py2
-rw-r--r--youtube_dl/extractor/pornhub.py4
-rw-r--r--youtube_dl/extractor/prosiebensat1.py11
-rw-r--r--youtube_dl/extractor/rtl2.py27
-rw-r--r--youtube_dl/extractor/rtlnl.py42
-rw-r--r--youtube_dl/extractor/rtp.py4
-rw-r--r--youtube_dl/extractor/rts.py31
-rw-r--r--youtube_dl/extractor/rtve.py6
-rw-r--r--youtube_dl/extractor/rtvnh.py47
-rw-r--r--youtube_dl/extractor/rutube.py1
-rw-r--r--youtube_dl/extractor/ruutu.py43
-rw-r--r--youtube_dl/extractor/safari.py5
-rw-r--r--youtube_dl/extractor/screenwavemedia.py113
-rw-r--r--youtube_dl/extractor/sexykarma.py1
-rw-r--r--youtube_dl/extractor/shahid.py107
-rw-r--r--youtube_dl/extractor/shared.py17
-rw-r--r--youtube_dl/extractor/smotri.py5
-rw-r--r--youtube_dl/extractor/snagfilms.py16
-rw-r--r--youtube_dl/extractor/soundcloud.py143
-rw-r--r--youtube_dl/extractor/southpark.py8
-rw-r--r--youtube_dl/extractor/spankwire.py60
-rw-r--r--youtube_dl/extractor/spiegel.py5
-rw-r--r--youtube_dl/extractor/sportdeutschland.py10
-rw-r--r--youtube_dl/extractor/tagesschau.py75
-rw-r--r--youtube_dl/extractor/telecinco.py5
-rw-r--r--youtube_dl/extractor/telegraaf.py35
-rw-r--r--youtube_dl/extractor/theplatform.py262
-rw-r--r--youtube_dl/extractor/tubitv.py4
-rw-r--r--youtube_dl/extractor/tudou.py9
-rw-r--r--youtube_dl/extractor/tumblr.py26
-rw-r--r--youtube_dl/extractor/tvplay.py1
-rw-r--r--youtube_dl/extractor/tweakers.py50
-rw-r--r--youtube_dl/extractor/twitch.py73
-rw-r--r--youtube_dl/extractor/udemy.py13
-rw-r--r--youtube_dl/extractor/vice.py37
-rw-r--r--youtube_dl/extractor/videobam.py81
-rw-r--r--youtube_dl/extractor/videolecturesnet.py2
-rw-r--r--youtube_dl/extractor/vidme.py135
-rw-r--r--youtube_dl/extractor/vier.py13
-rw-r--r--youtube_dl/extractor/viewster.py223
-rw-r--r--youtube_dl/extractor/viki.py46
-rw-r--r--youtube_dl/extractor/vimeo.py40
-rw-r--r--youtube_dl/extractor/vlive.py86
-rw-r--r--youtube_dl/extractor/washingtonpost.py8
-rw-r--r--youtube_dl/extractor/wimp.py23
-rw-r--r--youtube_dl/extractor/xhamster.py35
-rw-r--r--youtube_dl/extractor/xuite.py2
-rw-r--r--youtube_dl/extractor/yahoo.py62
-rw-r--r--youtube_dl/extractor/yandexmusic.py106
-rw-r--r--youtube_dl/extractor/youku.py19
-rw-r--r--youtube_dl/extractor/youtube.py382
-rw-r--r--youtube_dl/options.py8
-rw-r--r--youtube_dl/postprocessor/common.py7
-rw-r--r--youtube_dl/utils.py146
-rw-r--r--youtube_dl/version.py2
131 files changed, 5828 insertions, 2148 deletions
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index 00af78e06..d65253882 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -21,24 +21,24 @@ import subprocess
import socket
import sys
import time
+import tokenize
import traceback
if os.name == 'nt':
import ctypes
from .compat import (
- compat_basestring,
compat_cookiejar,
compat_expanduser,
compat_get_terminal_size,
compat_http_client,
compat_kwargs,
compat_str,
+ compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
)
from .utils import (
- escape_url,
ContentTooShortError,
date_from_str,
DateRange,
@@ -49,7 +49,6 @@ from .utils import (
ExtractorError,
format_bytes,
formatSeconds,
- HEADRequest,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
@@ -70,6 +69,7 @@ from .utils import (
version_tuple,
write_json_file,
write_string,
+ YoutubeDLCookieProcessor,
YoutubeDLHandler,
prepend_extension,
replace_extension,
@@ -285,7 +285,11 @@ class YoutubeDL(object):
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
- self.params = params
+ self.params = {
+ # Default parameters
+ 'nocheckcertificate': False,
+ }
+ self.params.update(params)
self.cache = Cache(self)
if params.get('bidi_workaround', False):
@@ -853,8 +857,8 @@ class YoutubeDL(object):
else:
raise Exception('Invalid result type: %s' % result_type)
- def _apply_format_filter(self, format_spec, available_formats):
- " Returns a tuple of the remaining format_spec and filtered formats "
+ def _build_format_filter(self, filter_spec):
+ " Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
@@ -864,13 +868,13 @@ class YoutubeDL(object):
'=': operator.eq,
'!=': operator.ne,
}
- operator_rex = re.compile(r'''(?x)\s*\[
+ operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
- \]$
+ $
''' % '|'.join(map(re.escape, OPERATORS.keys())))
- m = operator_rex.search(format_spec)
+ m = operator_rex.search(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
@@ -881,7 +885,7 @@ class YoutubeDL(object):
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
- m.group('value'), format_spec))
+ m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
@@ -889,85 +893,283 @@ class YoutubeDL(object):
'=': operator.eq,
'!=': operator.ne,
}
- str_operator_rex = re.compile(r'''(?x)\s*\[
+ str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
\s*(?P<value>[a-zA-Z0-9_-]+)
- \s*\]$
+ \s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
- m = str_operator_rex.search(format_spec)
+ m = str_operator_rex.search(filter_spec)
if m:
comparison_value = m.group('value')
op = STR_OPERATORS[m.group('op')]
if not m:
- raise ValueError('Invalid format specification %r' % format_spec)
+ raise ValueError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
- new_formats = [f for f in available_formats if _filter(f)]
+ return _filter
+
+ def build_format_selector(self, format_spec):
+ def syntax_error(note, start):
+ message = (
+ 'Invalid format specification: '
+ '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
+ return SyntaxError(message)
+
+ PICKFIRST = 'PICKFIRST'
+ MERGE = 'MERGE'
+ SINGLE = 'SINGLE'
+ GROUP = 'GROUP'
+ FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
+
+ def _parse_filter(tokens):
+ filter_parts = []
+ for type, string, start, _, _ in tokens:
+ if type == tokenize.OP and string == ']':
+ return ''.join(filter_parts)
+ else:
+ filter_parts.append(string)
+
+ def _remove_unused_ops(tokens):
+ # Remove operators that we don't use and join them with the sourrounding strings
+ # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
+ ALLOWED_OPS = ('/', '+', ',', '(', ')')
+ last_string, last_start, last_end, last_line = None, None, None, None
+ for type, string, start, end, line in tokens:
+ if type == tokenize.OP and string == '[':
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+ last_string = None
+ yield type, string, start, end, line
+ # everything inside brackets will be handled by _parse_filter
+ for type, string, start, end, line in tokens:
+ yield type, string, start, end, line
+ if type == tokenize.OP and string == ']':
+ break
+ elif type == tokenize.OP and string in ALLOWED_OPS:
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+ last_string = None
+ yield type, string, start, end, line
+ elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
+ if not last_string:
+ last_string = string
+ last_start = start
+ last_end = end
+ else:
+ last_string += string
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+
+ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
+ selectors = []
+ current_selector = None
+ for type, string, start, _, _ in tokens:
+ # ENCODING is only defined in python 3.x
+ if type == getattr(tokenize, 'ENCODING', None):
+ continue
+ elif type in [tokenize.NAME, tokenize.NUMBER]:
+ current_selector = FormatSelector(SINGLE, string, [])
+ elif type == tokenize.OP:
+ if string == ')':
+ if not inside_group:
+ # ')' will be handled by the parentheses group
+ tokens.restore_last_token()
+ break
+ elif inside_merge and string in ['/', ',']:
+ tokens.restore_last_token()
+ break
+ elif inside_choice and string == ',':
+ tokens.restore_last_token()
+ break
+ elif string == ',':
+ if not current_selector:
+ raise syntax_error('"," must follow a format selector', start)
+ selectors.append(current_selector)
+ current_selector = None
+ elif string == '/':
+ if not current_selector:
+ raise syntax_error('"/" must follow a format selector', start)
+ first_choice = current_selector
+ second_choice = _parse_format_selection(tokens, inside_choice=True)
+ current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
+ elif string == '[':
+ if not current_selector:
+ current_selector = FormatSelector(SINGLE, 'best', [])
+ format_filter = _parse_filter(tokens)
+ current_selector.filters.append(format_filter)
+ elif string == '(':
+ if current_selector:
+ raise syntax_error('Unexpected "("', start)
+ group = _parse_format_selection(tokens, inside_group=True)
+ current_selector = FormatSelector(GROUP, group, [])
+ elif string == '+':
+ video_selector = current_selector
+ audio_selector = _parse_format_selection(tokens, inside_merge=True)
+ if not video_selector or not audio_selector:
+ raise syntax_error('"+" must be between two format selectors', start)
+ current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
+ else:
+ raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
+ elif type == tokenize.ENDMARKER:
+ break
+ if current_selector:
+ selectors.append(current_selector)
+ return selectors
+
+ def _build_selector_function(selector):
+ if isinstance(selector, list):
+ fs = [_build_selector_function(s) for s in selector]
+
+ def selector_function(formats):
+ for f in fs:
+ for format in f(formats):
+ yield format
+ return selector_function
+ elif selector.type == GROUP:
+ selector_function = _build_selector_function(selector.selector)
+ elif selector.type == PICKFIRST:
+ fs = [_build_selector_function(s) for s in selector.selector]
+
+ def selector_function(formats):
+ for f in fs:
+ picked_formats = list(f(formats))
+ if picked_formats:
+ return picked_formats
+ return []
+ elif selector.type == SINGLE:
+ format_spec = selector.selector
+
+ def selector_function(formats):
+ formats = list(formats)
+ if not formats:
+ return
+ if format_spec == 'all':
+ for f in formats:
+ yield f
+ elif format_spec in ['best', 'worst', None]:
+ format_idx = 0 if format_spec == 'worst' else -1
+ audiovideo_formats = [
+ f for f in formats
+ if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
+ if audiovideo_formats:
+ yield audiovideo_formats[format_idx]
+ # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format
+ elif (all(f.get('acodec') != 'none' for f in formats) or
+ all(f.get('vcodec') != 'none' for f in formats)):
+ yield formats[format_idx]
+ elif format_spec == 'bestaudio':
+ audio_formats = [
+ f for f in formats
+ if f.get('vcodec') == 'none']
+ if audio_formats:
+ yield audio_formats[-1]
+ elif format_spec == 'worstaudio':
+ audio_formats = [
+ f for f in formats
+ if f.get('vcodec') == 'none']
+ if audio_formats:
+ yield audio_formats[0]
+ elif format_spec == 'bestvideo':
+ video_formats = [
+ f for f in formats
+ if f.get('acodec') == 'none']
+ if video_formats:
+ yield video_formats[-1]
+ elif format_spec == 'worstvideo':
+ video_formats = [
+ f for f in formats
+ if f.get('acodec') == 'none']
+ if video_formats:
+ yield video_formats[0]
+ else:
+ extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
+ if format_spec in extensions:
+ filter_f = lambda f: f['ext'] == format_spec
+ else:
+ filter_f = lambda f: f['format_id'] == format_spec
+ matches = list(filter(filter_f, formats))
+ if matches:
+ yield matches[-1]
+ elif selector.type == MERGE:
+ def _merge(formats_info):
+ format_1, format_2 = [f['format_id'] for f in formats_info]
+ # The first format must contain the video and the
+ # second the audio
+ if formats_info[0].get('vcodec') == 'none':
+ self.report_error('The first format must '
+ 'contain the video, try using '
+ '"-f %s+%s"' % (format_2, format_1))
+ return
+ output_ext = (
+ formats_info[0]['ext']
+ if self.params.get('merge_output_format') is None
+ else self.params['merge_output_format'])
+ return {
+ 'requested_formats': formats_info,
+ 'format': '%s+%s' % (formats_info[0].get('format'),
+ formats_info[1].get('format')),
+ 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
+ formats_info[1].get('format_id')),
+ 'width': formats_info[0].get('width'),
+ 'height': formats_info[0].get('height'),
+ 'resolution': formats_info[0].get('resolution'),
+ 'fps': formats_info[0].get('fps'),
+ 'vcodec': formats_info[0].get('vcodec'),
+ 'vbr': formats_info[0].get('vbr'),
+ 'stretched_ratio': formats_info[0].get('stretched_ratio'),
+ 'acodec': formats_info[1].get('acodec'),
+ 'abr': formats_info[1].get('abr'),
+ 'ext': output_ext,
+ }
+ video_selector, audio_selector = map(_build_selector_function, selector.selector)
- new_format_spec = format_spec[:-len(m.group(0))]
- if not new_format_spec:
- new_format_spec = 'best'
+ def selector_function(formats):
+ formats = list(formats)
+ for pair in itertools.product(video_selector(formats), audio_selector(formats)):
+ yield _merge(pair)
- return (new_format_spec, new_formats)
+ filters = [self._build_format_filter(f) for f in selector.filters]
- def select_format(self, format_spec, available_formats):
- while format_spec.endswith(']'):
- format_spec, available_formats = self._apply_format_filter(
- format_spec, available_formats)
- if not available_formats:
- return None
+ def final_selector(formats):
+ for _filter in filters:
+ formats = list(filter(_filter, formats))
+ return selector_function(formats)
+ return final_selector
- if format_spec in ['best', 'worst', None]:
- format_idx = 0 if format_spec == 'worst' else -1
- audiovideo_formats = [
- f for f in available_formats
- if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
- if audiovideo_formats:
- return audiovideo_formats[format_idx]
- # for audio only (soundcloud) or video only (imgur) urls, select the best/worst audio format
- elif (all(f.get('acodec') != 'none' for f in available_formats) or
- all(f.get('vcodec') != 'none' for f in available_formats)):
- return available_formats[format_idx]
- elif format_spec == 'bestaudio':
- audio_formats = [
- f for f in available_formats
- if f.get('vcodec') == 'none']
- if audio_formats:
- return audio_formats[-1]
- elif format_spec == 'worstaudio':
- audio_formats = [
- f for f in available_formats
- if f.get('vcodec') == 'none']
- if audio_formats:
- return audio_formats[0]
- elif format_spec == 'bestvideo':
- video_formats = [
- f for f in available_formats
- if f.get('acodec') == 'none']
- if video_formats:
- return video_formats[-1]
- elif format_spec == 'worstvideo':
- video_formats = [
- f for f in available_formats
- if f.get('acodec') == 'none']
- if video_formats:
- return video_formats[0]
- else:
- extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
- if format_spec in extensions:
- filter_f = lambda f: f['ext'] == format_spec
- else:
- filter_f = lambda f: f['format_id'] == format_spec
- matches = list(filter(filter_f, available_formats))
- if matches:
- return matches[-1]
- return None
+ stream = io.BytesIO(format_spec.encode('utf-8'))
+ try:
+ tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
+ except tokenize.TokenError:
+ raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
+
+ class TokenIterator(object):
+ def __init__(self, tokens):
+ self.tokens = tokens
+ self.counter = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.counter >= len(self.tokens):
+ raise StopIteration()
+ value = self.tokens[self.counter]
+ self.counter += 1
+ return value
+
+ next = __next__
+
+ def restore_last_token(self):
+ self.counter -= 1
+
+ parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
+ return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
@@ -1104,62 +1306,15 @@ class YoutubeDL(object):
if req_format is None:
req_format_list = []
if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and
- info_dict['extractor'] in ['youtube', 'ted']):
+ info_dict['extractor'] in ['youtube', 'ted'] and
+ not info_dict.get('is_live')):
merger = FFmpegMergerPP(self)
if merger.available and merger.can_merge():
req_format_list.append('bestvideo+bestaudio')
req_format_list.append('best')
req_format = '/'.join(req_format_list)
- formats_to_download = []
- if req_format == 'all':
- formats_to_download = formats
- else:
- for rfstr in req_format.split(','):
- # We can accept formats requested in the format: 34/5/best, we pick
- # the first that is available, starting from left
- req_formats = rfstr.split('/')
- for rf in req_formats:
- if re.match(r'.+?\+.+?', rf) is not None:
- # Two formats have been requested like '137+139'
- format_1, format_2 = rf.split('+')
- formats_info = (self.select_format(format_1, formats),
- self.select_format(format_2, formats))
- if all(formats_info):
- # The first format must contain the video and the
- # second the audio
- if formats_info[0].get('vcodec') == 'none':
- self.report_error('The first format must '
- 'contain the video, try using '
- '"-f %s+%s"' % (format_2, format_1))
- return
- output_ext = (
- formats_info[0]['ext']
- if self.params.get('merge_output_format') is None
- else self.params['merge_output_format'])
- selected_format = {
- 'requested_formats': formats_info,
- 'format': '%s+%s' % (formats_info[0].get('format'),
- formats_info[1].get('format')),
- 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
- formats_info[1].get('format_id')),
- 'width': formats_info[0].get('width'),
- 'height': formats_info[0].get('height'),
- 'resolution': formats_info[0].get('resolution'),
- 'fps': formats_info[0].get('fps'),
- 'vcodec': formats_info[0].get('vcodec'),
- 'vbr': formats_info[0].get('vbr'),
- 'stretched_ratio': formats_info[0].get('stretched_ratio'),
- 'acodec': formats_info[1].get('acodec'),
- 'abr': formats_info[1].get('abr'),
- 'ext': output_ext,
- }
- else:
- selected_format = None
- else:
- selected_format = self.select_format(rf, formats)
- if selected_format is not None:
- formats_to_download.append(selected_format)
- break
+ format_selector = self.build_format_selector(req_format)
+ formats_to_download = list(format_selector(formats))
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
@@ -1707,27 +1862,6 @@ class YoutubeDL(object):
def urlopen(self, req):
""" Start an HTTP download """
-
- # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
- # always respected by websites, some tend to give out URLs with non percent-encoded
- # non-ASCII characters (see telemb.py, ard.py [#3412])
- # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
- # To work around aforementioned issue we will replace request's original URL with
- # percent-encoded one
- req_is_string = isinstance(req, compat_basestring)
- url = req if req_is_string else req.get_full_url()
- url_escaped = escape_url(url)
-
- # Substitute URL if any change after escaping
- if url != url_escaped:
- if req_is_string:
- req = url_escaped
- else:
- req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
- req = req_type(
- url_escaped, data=req.data, headers=req.headers,
- origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
-
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
@@ -1810,8 +1944,7 @@ class YoutubeDL(object):
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load()
- cookie_processor = compat_urllib_request.HTTPCookieProcessor(
- self.cookiejar)
+ cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
@@ -1880,7 +2013,7 @@ class YoutubeDL(object):
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
- with open(thumb_filename, 'wb') as thumbf:
+ with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 55b22c889..5e2ed4d4b 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -9,7 +9,6 @@ import codecs
import io
import os
import random
-import shlex
import sys
@@ -20,6 +19,7 @@ from .compat import (
compat_expanduser,
compat_getpass,
compat_print,
+ compat_shlex_split,
workaround_optparse_bug9161,
)
from .utils import (
@@ -262,10 +262,10 @@ def _real_main(argv=None):
parser.error('setting filesize xattr requested but python-xattr is not available')
external_downloader_args = None
if opts.external_downloader_args:
- external_downloader_args = shlex.split(opts.external_downloader_args)
+ external_downloader_args = compat_shlex_split(opts.external_downloader_args)
postprocessor_args = None
if opts.postprocessor_args:
- postprocessor_args = shlex.split(opts.postprocessor_args)
+ postprocessor_args = compat_shlex_split(opts.postprocessor_args)
match_filter = (
None if opts.match_filter is None
else match_filter_func(opts.match_filter))
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index 0c57c7aeb..e32bef279 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -5,6 +5,7 @@ import getpass
import optparse
import os
import re
+import shlex
import shutil
import socket
import subprocess
@@ -43,6 +44,11 @@ except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
+ import http.cookies as compat_cookies
+except ImportError: # Python 2
+ import Cookie as compat_cookies
+
+try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
@@ -222,6 +228,17 @@ except ImportError: # Python < 3.3
return "'" + s.replace("'", "'\"'\"'") + "'"
+if sys.version_info >= (2, 7, 3):
+ compat_shlex_split = shlex.split
+else:
+ # Working around shlex issue with unicode strings on some python 2
+ # versions (see http://bugs.python.org/issue1548891)
+ def compat_shlex_split(s, comments=False, posix=True):
+ if isinstance(s, unicode):
+ s = s.encode('utf-8')
+ return shlex.split(s, comments, posix)
+
+
def compat_ord(c):
if type(c) is int:
return c
@@ -431,11 +448,17 @@ except TypeError: # Python 2.6
yield n
n += step
+if sys.version_info >= (3, 0):
+ from tokenize import tokenize as compat_tokenize_tokenize
+else:
+ from tokenize import generate_tokens as compat_tokenize_tokenize
+
__all__ = [
'compat_HTTPError',
'compat_basestring',
'compat_chr',
'compat_cookiejar',
+ 'compat_cookies',
'compat_expanduser',
'compat_get_terminal_size',
'compat_getenv',
@@ -448,9 +471,11 @@ __all__ = [
'compat_ord',
'compat_parse_qs',
'compat_print',
+ 'compat_shlex_split',
'compat_socket_create_connection',
'compat_str',
'compat_subprocess_get_DEVNULL',
+ 'compat_tokenize_tokenize',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
diff --git a/youtube_dl/downloader/__init__.py b/youtube_dl/downloader/__init__.py
index f110830c4..dccc59212 100644
--- a/youtube_dl/downloader/__init__.py
+++ b/youtube_dl/downloader/__init__.py
@@ -8,6 +8,7 @@ from .hls import NativeHlsFD
from .http import HttpFD
from .rtsp import RtspFD
from .rtmp import RtmpFD
+from .dash import DashSegmentsFD
from ..utils import (
determine_protocol,
@@ -20,6 +21,7 @@ PROTOCOL_MAP = {
'mms': RtspFD,
'rtsp': RtspFD,
'f4m': F4mFD,
+ 'http_dash_segments': DashSegmentsFD,
}
diff --git a/youtube_dl/downloader/dash.py b/youtube_dl/downloader/dash.py
new file mode 100644
index 000000000..8b6fa2753
--- /dev/null
+++ b/youtube_dl/downloader/dash.py
@@ -0,0 +1,66 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import FileDownloader
+from ..compat import compat_urllib_request
+
+
+class DashSegmentsFD(FileDownloader):
+ """
+ Download segments in a DASH manifest
+ """
+ def real_download(self, filename, info_dict):
+ self.report_destination(filename)
+ tmpfilename = self.temp_name(filename)
+ base_url = info_dict['url']
+ segment_urls = info_dict['segment_urls']
+
+ is_test = self.params.get('test', False)
+ remaining_bytes = self._TEST_FILE_SIZE if is_test else None
+ byte_counter = 0
+
+ def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
+ self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
+ req = compat_urllib_request.Request(target_url)
+ if remaining_bytes is not None:
+ req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
+
+ data = self.ydl.urlopen(req).read()
+
+ if remaining_bytes is not None:
+ data = data[:remaining_bytes]
+
+ outf.write(data)
+ return len(data)
+
+ def combine_url(base_url, target_url):
+ if re.match(r'^https?://', target_url):
+ return target_url
+ return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
+
+ with open(tmpfilename, 'wb') as outf:
+ append_url_to_file(
+ outf, combine_url(base_url, info_dict['initialization_url']),
+ 'initialization segment')
+ for i, segment_url in enumerate(segment_urls):
+ segment_len = append_url_to_file(
+ outf, combine_url(base_url, segment_url),
+ 'segment %d / %d' % (i + 1, len(segment_urls)),
+ remaining_bytes)
+ byte_counter += segment_len
+ if remaining_bytes is not None:
+ remaining_bytes -= segment_len
+ if remaining_bytes <= 0:
+ break
+
+ self.try_rename(tmpfilename, filename)
+
+ self._hook_progress({
+ 'downloaded_bytes': byte_counter,
+ 'total_bytes': byte_counter,
+ 'filename': filename,
+ 'status': 'finished',
+ })
+
+ return True
diff --git a/youtube_dl/downloader/external.py b/youtube_dl/downloader/external.py
index 1d5cc9904..2bc011266 100644
--- a/youtube_dl/downloader/external.py
+++ b/youtube_dl/downloader/external.py
@@ -5,6 +5,10 @@ import subprocess
from .common import FileDownloader
from ..utils import (
+ cli_option,
+ cli_valueless_option,
+ cli_bool_option,
+ cli_configuration_args,
encodeFilename,
encodeArgument,
)
@@ -45,18 +49,17 @@ class ExternalFD(FileDownloader):
def supports(cls, info_dict):
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
- def _source_address(self, command_option):
- source_address = self.params.get('source_address')
- if source_address is None:
- return []
- return [command_option, source_address]
+ def _option(self, command_option, param):
+ return cli_option(self.params, command_option, param)
+
+ def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None):
+ return cli_bool_option(self.params, command_option, param, true_value, false_value, separator)
+
+ def _valueless_option(self, command_option, param, expected_value=True):
+ return cli_valueless_option(self.params, command_option, param, expected_value)
def _configuration_args(self, default=[]):
- ex_args = self.params.get('external_downloader_args')
- if ex_args is None:
- return default
- assert isinstance(ex_args, list)
- return ex_args
+ return cli_configuration_args(self.params, 'external_downloader_args', default)
def _call_downloader(self, tmpfilename, info_dict):
""" Either overwrite this or implement _make_cmd """
@@ -77,7 +80,19 @@ class CurlFD(ExternalFD):
cmd = [self.exe, '--location', '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
- cmd += self._source_address('--interface')
+ cmd += self._option('--interface', 'source_address')
+ cmd += self._option('--proxy', 'proxy')
+ cmd += self._valueless_option('--insecure', 'nocheckcertificate')
+ cmd += self._configuration_args()
+ cmd += ['--', info_dict['url']]
+ return cmd
+
+
+class AxelFD(ExternalFD):
+ def _make_cmd(self, tmpfilename, info_dict):
+ cmd = [self.exe, '-o', tmpfilename]
+ for key, val in info_dict['http_headers'].items():
+ cmd += ['-H', '%s: %s' % (key, val)]
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
@@ -88,7 +103,9 @@ class WgetFD(ExternalFD):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
- cmd += self._source_address('--bind-address')
+ cmd += self._option('--bind-address', 'source_address')
+ cmd += self._option('--proxy', 'proxy')
+ cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
@@ -105,7 +122,9 @@ class Aria2cFD(ExternalFD):
cmd += ['--out', os.path.basename(tmpfilename)]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
- cmd += self._source_address('--interface')
+ cmd += self._option('--interface', 'source_address')
+ cmd += self._option('--all-proxy', 'proxy')
+ cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
cmd += ['--', info_dict['url']]
return cmd
diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py
index b1a858c45..174180db5 100644
--- a/youtube_dl/downloader/f4m.py
+++ b/youtube_dl/downloader/f4m.py
@@ -7,17 +7,16 @@ import os
import time
import xml.etree.ElementTree as etree
-from .common import FileDownloader
-from .http import HttpFD
+from .fragment import FragmentFD
from ..compat import (
compat_urlparse,
compat_urllib_error,
)
from ..utils import (
- struct_pack,
- struct_unpack,
encodeFilename,
sanitize_open,
+ struct_pack,
+ struct_unpack,
xpath_text,
)
@@ -226,16 +225,13 @@ def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
-class HttpQuietDownloader(HttpFD):
- def to_screen(self, *args, **kargs):
- pass
-
-
-class F4mFD(FileDownloader):
+class F4mFD(FragmentFD):
"""
A downloader for f4m manifests or AdobeHDS.
"""
+ FD_NAME = 'f4m'
+
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
@@ -288,7 +284,7 @@ class F4mFD(FileDownloader):
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
- self.to_screen('[download] Downloading f4m manifest')
+ self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
manifest = self.ydl.urlopen(man_url).read()
doc = etree.fromstring(manifest)
@@ -320,67 +316,20 @@ class F4mFD(FileDownloader):
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
- self.report_destination(filename)
- http_dl = HttpQuietDownloader(
- self.ydl,
- {
- 'continuedl': True,
- 'quiet': True,
- 'noprogress': True,
- 'ratelimit': self.params.get('ratelimit', None),
- 'test': self.params.get('test', False),
- }
- )
- tmpfilename = self.temp_name(filename)
- (dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
+ ctx = {
+ 'filename': filename,
+ 'total_frags': total_frags,
+ }
+
+ self._prepare_frag_download(ctx)
+
+ dest_stream = ctx['dest_stream']
write_flv_header(dest_stream)
if not live:
write_metadata_tag(dest_stream, metadata)
- # This dict stores the download progress, it's updated by the progress
- # hook
- state = {
- 'status': 'downloading',
- 'downloaded_bytes': 0,
- 'frag_index': 0,
- 'frag_count': total_frags,
- 'filename': filename,
- 'tmpfilename': tmpfilename,
- }
- start = time.time()
-
- def frag_progress_hook(s):
- if s['status'] not in ('downloading', 'finished'):
- return
-
- frag_total_bytes = s.get('total_bytes', 0)
- if s['status'] == 'finished':
- state['downloaded_bytes'] += frag_total_bytes
- state['frag_index'] += 1
-
- estimated_size = (
- (state['downloaded_bytes'] + frag_total_bytes) /
- (state['frag_index'] + 1) * total_frags)
- time_now = time.time()
- state['total_bytes_estimate'] = estimated_size
- state['elapsed'] = time_now - start
-
- if s['status'] == 'finished':
- progress = self.calc_percent(state['frag_index'], total_frags)
- else:
- frag_downloaded_bytes = s['downloaded_bytes']
- frag_progress = self.calc_percent(frag_downloaded_bytes,
- frag_total_bytes)
- progress = self.calc_percent(state['frag_index'], total_frags)
- progress += frag_progress / float(total_frags)
-
- state['eta'] = self.calc_eta(
- start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
- state['speed'] = s.get('speed')
- self._hook_progress(state)
-
- http_dl.add_progress_hook(frag_progress_hook)
+ self._start_frag_download(ctx)
frags_filenames = []
while fragments_list:
@@ -391,23 +340,24 @@ class F4mFD(FileDownloader):
url += '?' + akamai_pv.strip(';')
if info_dict.get('extra_param_to_segment_url'):
url += info_dict.get('extra_param_to_segment_url')
- frag_filename = '%s-%s' % (tmpfilename, name)
+ frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
try:
- success = http_dl.download(frag_filename, {'url': url})
+ success = ctx['dl'].download(frag_filename, {'url': url})
if not success:
return False
- with open(frag_filename, 'rb') as down:
- down_data = down.read()
- reader = FlvReader(down_data)
- while True:
- _, box_type, box_data = reader.read_box_info()
- if box_type == b'mdat':
- dest_stream.write(box_data)
- break
+ (down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
+ down_data = down.read()
+ down.close()
+ reader = FlvReader(down_data)
+ while True:
+ _, box_type, box_data = reader.read_box_info()
+ if box_type == b'mdat':
+ dest_stream.write(box_data)
+ break
if live:
- os.remove(frag_filename)
+ os.remove(encodeFilename(frag_sanitized))
else:
- frags_filenames.append(frag_filename)
+ frags_filenames.append(frag_sanitized)
except (compat_urllib_error.HTTPError, ) as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
@@ -425,20 +375,9 @@ class F4mFD(FileDownloader):
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg)
- dest_stream.close()
+ self._finish_frag_download(ctx)
- elapsed = time.time() - start
- self.try_rename(tmpfilename, filename)
for frag_file in frags_filenames:
- os.remove(frag_file)
-
- fsize = os.path.getsize(encodeFilename(filename))
- self._hook_progress({
- 'downloaded_bytes': fsize,
- 'total_bytes': fsize,
- 'filename': filename,
- 'status': 'finished',
- 'elapsed': elapsed,
- })
+ os.remove(encodeFilename(frag_file))
return True
diff --git a/youtube_dl/downloader/fragment.py b/youtube_dl/downloader/fragment.py
new file mode 100644
index 000000000..5a64b29ee
--- /dev/null
+++ b/youtube_dl/downloader/fragment.py
@@ -0,0 +1,111 @@
+from __future__ import division, unicode_literals
+
+import os
+import time
+
+from .common import FileDownloader
+from .http import HttpFD
+from ..utils import (
+ encodeFilename,
+ sanitize_open,
+)
+
+
+class HttpQuietDownloader(HttpFD):
+ def to_screen(self, *args, **kargs):
+ pass
+
+
+class FragmentFD(FileDownloader):
+ """
+ A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
+ """
+
+ def _prepare_and_start_frag_download(self, ctx):
+ self._prepare_frag_download(ctx)
+ self._start_frag_download(ctx)
+
+ def _prepare_frag_download(self, ctx):
+ self.to_screen('[%s] Total fragments: %d' % (self.FD_NAME, ctx['total_frags']))
+ self.report_destination(ctx['filename'])
+ dl = HttpQuietDownloader(
+ self.ydl,
+ {
+ 'continuedl': True,
+ 'quiet': True,
+ 'noprogress': True,
+ 'ratelimit': self.params.get('ratelimit', None),
+ 'retries': self.params.get('retries', 0),
+ 'test': self.params.get('test', False),
+ }
+ )
+ tmpfilename = self.temp_name(ctx['filename'])
+ dest_stream, tmpfilename = sanitize_open(tmpfilename, 'wb')
+ ctx.update({
+ 'dl': dl,
+ 'dest_stream': dest_stream,
+ 'tmpfilename': tmpfilename,
+ })
+
+ def _start_frag_download(self, ctx):
+ total_frags = ctx['total_frags']
+ # This dict stores the download progress, it's updated by the progress
+ # hook
+ state = {
+ 'status': 'downloading',
+ 'downloaded_bytes': 0,
+ 'frag_index': 0,
+ 'frag_count': total_frags,
+ 'filename': ctx['filename'],
+ 'tmpfilename': ctx['tmpfilename'],
+ }
+ start = time.time()
+ ctx['started'] = start
+
+ def frag_progress_hook(s):
+ if s['status'] not in ('downloading', 'finished'):
+ return
+
+ frag_total_bytes = s.get('total_bytes', 0)
+ if s['status'] == 'finished':
+ state['downloaded_bytes'] += frag_total_bytes
+ state['frag_index'] += 1
+
+ estimated_size = (
+ (state['downloaded_bytes'] + frag_total_bytes) /
+ (state['frag_index'] + 1) * total_frags)
+ time_now = time.time()
+ state['total_bytes_estimate'] = estimated_size
+ state['elapsed'] = time_now - start
+
+ if s['status'] == 'finished':
+ progress = self.calc_percent(state['frag_index'], total_frags)
+ else:
+ frag_downloaded_bytes = s['downloaded_bytes']
+ frag_progress = self.calc_percent(frag_downloaded_bytes,
+ frag_total_bytes)
+ progress = self.calc_percent(state['frag_index'], total_frags)
+ progress += frag_progress / float(total_frags)
+
+ state['eta'] = self.calc_eta(
+ start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
+ state['speed'] = s.get('speed')
+ self._hook_progress(state)
+
+ ctx['dl'].add_progress_hook(frag_progress_hook)
+
+ return start
+
+ def _finish_frag_download(self, ctx):
+ ctx['dest_stream'].close()
+ elapsed = time.time() - ctx['started']
+ self.try_rename(ctx['tmpfilename'], ctx['filename'])
+ fsize = os.path.getsize(encodeFilename(ctx['filename']))
+
+ self._hook_progress({
+ 'downloaded_bytes': fsize,
+ 'total_bytes': fsize,
+ 'filename': ctx['filename'],
+ 'status': 'finished',
+ 'elapsed': elapsed,
+ })
diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py
index 8be4f4249..b2436e732 100644
--- a/youtube_dl/downloader/hls.py
+++ b/youtube_dl/downloader/hls.py
@@ -4,15 +4,15 @@ import os
import re
import subprocess
-from ..postprocessor.ffmpeg import FFmpegPostProcessor
from .common import FileDownloader
-from ..compat import (
- compat_urlparse,
- compat_urllib_request,
-)
+from .fragment import FragmentFD
+
+from ..compat import compat_urlparse
+from ..postprocessor.ffmpeg import FFmpegPostProcessor
from ..utils import (
encodeArgument,
encodeFilename,
+ sanitize_open,
)
@@ -33,6 +33,8 @@ class HlsFD(FileDownloader):
for opt in (ffpp.executable, '-y', '-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc')]
args.append(encodeFilename(tmpfilename, True))
+ self._debug_cmd(args)
+
retval = subprocess.call(args)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
@@ -51,54 +53,51 @@ class HlsFD(FileDownloader):
return False
-class NativeHlsFD(FileDownloader):
+class NativeHlsFD(FragmentFD):
""" A more limited implementation that does not require ffmpeg """
+ FD_NAME = 'hlsnative'
+
def real_download(self, filename, info_dict):
- url = info_dict['url']
- self.report_destination(filename)
- tmpfilename = self.temp_name(filename)
+ man_url = info_dict['url']
+ self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
+ manifest = self.ydl.urlopen(man_url).read()
- self.to_screen(
- '[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
- data = self.ydl.urlopen(url).read()
- s = data.decode('utf-8', 'ignore')
- segment_urls = []
+ s = manifest.decode('utf-8', 'ignore')
+ fragment_urls = []
for line in s.splitlines():
line = line.strip()
if line and not line.startswith('#'):
segment_url = (
line
if re.match(r'^https?://', line)
- else compat_urlparse.urljoin(url, line))
- segment_urls.append(segment_url)
-
- is_test = self.params.get('test', False)
- remaining_bytes = self._TEST_FILE_SIZE if is_test else None
- byte_counter = 0
- with open(tmpfilename, 'wb') as outf:
- for i, segurl in enumerate(segment_urls):
- self.to_screen(
- '[hlsnative] %s: Downloading segment %d / %d' %
- (info_dict['id'], i + 1, len(segment_urls)))
- seg_req = compat_urllib_request.Request(segurl)
- if remaining_bytes is not None:
- seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
-
- segment = self.ydl.urlopen(seg_req).read()
- if remaining_bytes is not None:
- segment = segment[:remaining_bytes]
- remaining_bytes -= len(segment)
- outf.write(segment)
- byte_counter += len(segment)
- if remaining_bytes is not None and remaining_bytes <= 0:
+ else compat_urlparse.urljoin(man_url, line))
+ fragment_urls.append(segment_url)
+ # We only download the first fragment during the test
+ if self.params.get('test', False):
break
- self._hook_progress({
- 'downloaded_bytes': byte_counter,
- 'total_bytes': byte_counter,
+ ctx = {
'filename': filename,
- 'status': 'finished',
- })
- self.try_rename(tmpfilename, filename)
+ 'total_frags': len(fragment_urls),
+ }
+
+ self._prepare_and_start_frag_download(ctx)
+
+ frags_filenames = []
+ for i, frag_url in enumerate(fragment_urls):
+ frag_filename = '%s-Frag%d' % (ctx['tmpfilename'], i)
+ success = ctx['dl'].download(frag_filename, {'url': frag_url})
+ if not success:
+ return False
+ down, frag_sanitized = sanitize_open(frag_filename, 'rb')
+ ctx['dest_stream'].write(down.read())
+ down.close()
+ frags_filenames.append(frag_sanitized)
+
+ self._finish_frag_download(ctx)
+
+ for frag_file in frags_filenames:
+ os.remove(encodeFilename(frag_file))
+
return True
diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index b7f144af9..a29f5cf31 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -4,6 +4,7 @@ import errno
import os
import socket
import time
+import re
from .common import FileDownloader
from ..compat import (
@@ -57,6 +58,24 @@ class HttpFD(FileDownloader):
# Establish connection
try:
data = self.ydl.urlopen(request)
+ # When trying to resume, Content-Range HTTP header of response has to be checked
+ # to match the value of requested Range HTTP header. This is due to a webservers
+ # that don't support resuming and serve a whole file with no Content-Range
+ # set in response despite of requested Range (see
+ # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
+ if resume_len > 0:
+ content_range = data.headers.get('Content-Range')
+ if content_range:
+ content_range_m = re.search(r'bytes (\d+)-', content_range)
+ # Content-Range is present and matches requested Range, resume is possible
+ if content_range_m and resume_len == int(content_range_m.group(1)):
+ break
+ # Content-Range is either not present or invalid. Assuming remote webserver is
+ # trying to send the whole file, resume is not possible, so wiping the local file
+ # and performing entire redownload
+ self.report_unable_to_resume()
+ resume_len = 0
+ open_mode = 'wb'
break
except (compat_urllib_error.HTTPError, ) as err:
if (err.code < 500 or err.code >= 600) and err.code != 416:
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index 06f21064b..57f55b479 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -19,9 +19,14 @@ from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
+from .appleconnect import AppleConnectIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
-from .ard import ARDIE, ARDMediathekIE
+from .ard import (
+ ARDIE,
+ ARDMediathekIE,
+ SportschauIE,
+)
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
@@ -38,7 +43,10 @@ from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
-from .bbccouk import BBCCoUkIE
+from .bbc import (
+ BBCCoUkIE,
+ BBCIE,
+)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
@@ -110,6 +118,7 @@ from .dailymotion import (
)
from .daum import DaumIE
from .dbtv import DBTVIE
+from .dcn import DCNIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
@@ -149,6 +158,7 @@ from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
+from .esri import EsriVideoIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
@@ -231,13 +241,21 @@ from .imdb import (
ImdbIE,
ImdbListIE
)
-from .imgur import ImgurIE
+from .imgur import (
+ ImgurIE,
+ ImgurAlbumIE,
+)
from .ina import InaIE
+from .indavideo import (
+ IndavideoIE,
+ IndavideoEmbedIE,
+)
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
+from .ir90tv import Ir90TvIE
from .ivi import (
IviIE,
IviCompilationIE
@@ -270,6 +288,7 @@ from .kuwo import (
)
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
+from .lecture2go import Lecture2GoIE
from .letv import (
LetvIE,
LetvTvIE,
@@ -324,11 +343,12 @@ from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
+ MTVDEIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
-from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
+from .mwave import MwaveIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import MyviIE
@@ -342,6 +362,7 @@ from .nbc import (
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
+ MSNBCIE,
)
from .ndr import (
NDRIE,
@@ -421,6 +442,10 @@ from .orf import (
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
+from .periscope import (
+ PeriscopeIE,
+ QuickscopeIE,
+)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
@@ -429,8 +454,13 @@ from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
+from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
+from .pluralsight import (
+ PluralsightIE,
+ PluralsightCourseIE,
+)
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
@@ -476,6 +506,7 @@ from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
+from .rtvnh import RTVNHIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
@@ -502,6 +533,7 @@ from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
+from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
@@ -581,6 +613,7 @@ from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
+from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
@@ -588,7 +621,10 @@ from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
-from .theplatform import ThePlatformIE
+from .theplatform import (
+ ThePlatformIE,
+ ThePlatformFeedIE,
+)
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
@@ -672,7 +708,6 @@ from .vgtv import (
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
-from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
@@ -707,6 +742,7 @@ from .vk import (
VKIE,
VKUserVideosIE,
)
+from .vlive import VLiveIE
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py
index dc0fb85d6..f9a389f67 100644
--- a/youtube_dl/extractor/abc.py
+++ b/youtube_dl/extractor/abc.py
@@ -1,16 +1,20 @@
from __future__ import unicode_literals
import re
-import json
from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ js_to_json,
+ int_or_none,
+)
class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au'
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
@@ -19,22 +23,47 @@ class ABCIE(InfoExtractor):
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
- }
+ }, {
+ 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
+ 'md5': 'db2a5369238b51f9811ad815b69dc086',
+ 'info_dict': {
+ 'id': 'NvqvPeNZsHU',
+ 'ext': 'mp4',
+ 'upload_date': '20150816',
+ 'uploader': 'ABC News (Australia)',
+ 'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
+ 'uploader_id': 'NewsOnABC',
+ 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
+ },
+ 'add_ie': ['Youtube'],
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- urls_info_json = self._search_regex(
- r'inlineVideoData\.push\((.*?)\);', webpage, 'video urls',
- flags=re.DOTALL)
- urls_info = json.loads(urls_info_json.replace('\'', '"'))
+ mobj = re.search(
+ r'inline(?P<type>Video|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
+ webpage)
+ if mobj is None:
+ raise ExtractorError('Unable to extract video urls')
+
+ urls_info = self._parse_json(
+ mobj.group('json_data'), video_id, transform_source=js_to_json)
+
+ if not isinstance(urls_info, list):
+ urls_info = [urls_info]
+
+ if mobj.group('type') == 'YouTube':
+ return self.playlist_result([
+ self.url_result(url_info['url']) for url_info in urls_info])
+
formats = [{
'url': url_info['url'],
- 'width': int(url_info['width']),
- 'height': int(url_info['height']),
- 'tbr': int(url_info['bitrate']),
- 'filesize': int(url_info['filesize']),
+ 'width': int_or_none(url_info.get('width')),
+ 'height': int_or_none(url_info.get('height')),
+ 'tbr': int_or_none(url_info.get('bitrate')),
+ 'filesize': int_or_none(url_info.get('filesize')),
} for url_info in urls_info]
self._sort_formats(formats)
diff --git a/youtube_dl/extractor/academicearth.py b/youtube_dl/extractor/academicearth.py
index 47313fba8..34095501c 100644
--- a/youtube_dl/extractor/academicearth.py
+++ b/youtube_dl/extractor/academicearth.py
@@ -15,7 +15,7 @@ class AcademicEarthCourseIE(InfoExtractor):
'title': 'Laws of Nature',
'description': 'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.',
},
- 'playlist_count': 4,
+ 'playlist_count': 3,
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py
index 39335b827..4327c2f61 100644
--- a/youtube_dl/extractor/adultswim.py
+++ b/youtube_dl/extractor/adultswim.py
@@ -156,7 +156,7 @@ class AdultSwimIE(InfoExtractor):
xpath_text(idoc, './/trt', 'segment duration').strip())
formats = []
- file_els = idoc.findall('.//files/file')
+ file_els = idoc.findall('.//files/file') or idoc.findall('./files/file')
for file_el in file_els:
bitrate = file_el.attrib.get('bitrate')
diff --git a/youtube_dl/extractor/airmozilla.py b/youtube_dl/extractor/airmozilla.py
index 611ad1e9d..f8e70f4e5 100644
--- a/youtube_dl/extractor/airmozilla.py
+++ b/youtube_dl/extractor/airmozilla.py
@@ -20,14 +20,14 @@ class AirMozillaIE(InfoExtractor):
'id': '6x4q2w',
'ext': 'mp4',
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
- 'thumbnail': 're:https://\w+\.cloudfront\.net/6x4q2w/poster\.jpg\?t=\d+',
+ 'thumbnail': 're:https?://vid\.ly/(?P<id>[0-9a-z-]+)/poster',
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
'timestamp': 1422487800,
'upload_date': '20150128',
'location': 'SFO Commons',
'duration': 3780,
'view_count': int,
- 'categories': ['Main'],
+ 'categories': ['Main', 'Privacy'],
}
}
diff --git a/youtube_dl/extractor/aljazeera.py b/youtube_dl/extractor/aljazeera.py
index 612708e25..184a14a4f 100644
--- a/youtube_dl/extractor/aljazeera.py
+++ b/youtube_dl/extractor/aljazeera.py
@@ -16,6 +16,7 @@ class AlJazeeraIE(InfoExtractor):
'uploader': 'Al Jazeera English',
},
'add_ie': ['Brightcove'],
+ 'skip': 'Not accessible from Travis CI server',
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/appleconnect.py b/youtube_dl/extractor/appleconnect.py
new file mode 100644
index 000000000..ea7a70393
--- /dev/null
+++ b/youtube_dl/extractor/appleconnect.py
@@ -0,0 +1,50 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ str_to_int,
+ ExtractorError
+)
+
+
+class AppleConnectIE(InfoExtractor):
+ _VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)'
+ _TEST = {
+ 'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
+ 'md5': '10d0f2799111df4cb1c924520ca78f98',
+ 'info_dict': {
+ 'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
+ 'ext': 'm4v',
+ 'title': 'Energy',
+ 'uploader': 'Drake',
+ 'thumbnail': 'http://is5.mzstatic.com/image/thumb/Video5/v4/78/61/c5/7861c5fa-ad6d-294b-1464-cf7605b911d6/source/1920x1080sr.jpg',
+ 'upload_date': '20150710',
+ 'timestamp': 1436545535,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ try:
+ video_json = self._html_search_regex(
+ r'class="auc-video-data">(\{.*?\})', webpage, 'json')
+ except ExtractorError:
+ raise ExtractorError('This post doesn\'t contain a video', expected=True)
+
+ video_data = self._parse_json(video_json, video_id)
+ timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
+ like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count'))
+
+ return {
+ 'id': video_id,
+ 'url': video_data['sslSrc'],
+ 'title': video_data['title'],
+ 'description': video_data['description'],
+ 'uploader': video_data['artistName'],
+ 'thumbnail': video_data['artworkUrl'],
+ 'timestamp': timestamp,
+ 'like_count': like_count,
+ }
diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
index 6a35ea463..6f465789b 100644
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -8,6 +8,7 @@ from .generic import GenericIE
from ..utils import (
determine_ext,
ExtractorError,
+ get_element_by_attribute,
qualities,
int_or_none,
parse_duration,
@@ -22,19 +23,125 @@ class ARDMediathekIE(InfoExtractor):
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
_TESTS = [{
- 'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
- 'only_matching': True,
+ 'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
+ 'info_dict': {
+ 'id': '29582122',
+ 'ext': 'mp4',
+ 'title': 'Ich liebe das Leben trotzdem',
+ 'description': 'md5:45e4c225c72b27993314b31a84a5261c',
+ 'duration': 4557,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
}, {
- 'url': 'http://www.ardmediathek.de/tv/Tatort/Das-Wunder-von-Wolbeck-Video-tgl-ab-20/Das-Erste/Video?documentId=22490580&bcastId=602916',
+ 'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
+ 'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
'info_dict': {
- 'id': '22490580',
+ 'id': '29522730',
'ext': 'mp4',
- 'title': 'Das Wunder von Wolbeck (Video tgl. ab 20 Uhr)',
- 'description': 'Auf einem restaurierten Hof bei Wolbeck wird der Heilpraktiker Raffael Lembeck eines morgens von seiner Frau Stella tot aufgefunden. Das Opfer war offensichtlich in seiner Praxis zu Fall gekommen und ist dann verblutet, erklärt Prof. Boerne am Tatort.',
+ 'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
+ 'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
+ 'duration': 5252,
},
- 'skip': 'Blocked outside of Germany',
+ }, {
+ # audio
+ 'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
+ 'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
+ 'info_dict': {
+ 'id': '28488308',
+ 'ext': 'mp3',
+ 'title': 'Tod eines Fußballers',
+ 'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
+ 'duration': 3240,
+ },
+ }, {
+ 'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
+ 'only_matching': True,
}]
+ def _extract_media_info(self, media_info_url, webpage, video_id):
+ media_info = self._download_json(
+ media_info_url, video_id, 'Downloading media JSON')
+
+ formats = self._extract_formats(media_info, video_id)
+
+ if not formats:
+ if '"fsk"' in webpage:
+ raise ExtractorError(
+ 'This video is only available after 20:00', expected=True)
+ elif media_info.get('_geoblocked'):
+ raise ExtractorError('This video is not available due to geo restriction', expected=True)
+
+ self._sort_formats(formats)
+
+ duration = int_or_none(media_info.get('_duration'))
+ thumbnail = media_info.get('_previewImage')
+
+ subtitles = {}
+ subtitle_url = media_info.get('_subtitleUrl')
+ if subtitle_url:
+ subtitles['de'] = [{
+ 'ext': 'srt',
+ 'url': subtitle_url,
+ }]
+
+ return {
+ 'id': video_id,
+ 'duration': duration,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ def _extract_formats(self, media_info, video_id):
+ type_ = media_info.get('_type')
+ media_array = media_info.get('_mediaArray', [])
+ formats = []
+ for num, media in enumerate(media_array):
+ for stream in media.get('_mediaStreamArray', []):
+ stream_urls = stream.get('_stream')
+ if not stream_urls:
+ continue
+ if not isinstance(stream_urls, list):
+ stream_urls = [stream_urls]
+ quality = stream.get('_quality')
+ server = stream.get('_server')
+ for stream_url in stream_urls:
+ ext = determine_ext(stream_url)
+ if ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(
+ stream_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124',
+ video_id, preference=-1, f4m_id='hds'))
+ elif ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ stream_url, video_id, 'mp4', preference=1, m3u8_id='hls'))
+ else:
+ if server and server.startswith('rtmp'):
+ f = {
+ 'url': server,
+ 'play_path': stream_url,
+ 'format_id': 'a%s-rtmp-%s' % (num, quality),
+ }
+ elif stream_url.startswith('http'):
+ f = {
+ 'url': stream_url,
+ 'format_id': 'a%s-%s-%s' % (num, ext, quality)
+ }
+ else:
+ continue
+ m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
+ if m:
+ f.update({
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
+ if type_ == 'audio':
+ f['vcodec'] = 'none'
+ formats.append(f)
+ return formats
+
def _real_extract(self, url):
# determine video id from url
m = re.match(self._VALID_URL, url)
@@ -92,46 +199,22 @@ class ARDMediathekIE(InfoExtractor):
'format_id': fid,
'url': furl,
})
+ self._sort_formats(formats)
+ info = {
+ 'formats': formats,
+ }
else: # request JSON file
- media_info = self._download_json(
- 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
- # The second element of the _mediaArray contains the standard http urls
- streams = media_info['_mediaArray'][1]['_mediaStreamArray']
- if not streams:
- if '"fsk"' in webpage:
- raise ExtractorError('This video is only available after 20:00')
-
- formats = []
- for s in streams:
- if type(s['_stream']) == list:
- for index, url in enumerate(s['_stream'][::-1]):
- quality = s['_quality'] + index
- formats.append({
- 'quality': quality,
- 'url': url,
- 'format_id': '%s-%s' % (determine_ext(url), quality)
- })
- continue
-
- format = {
- 'quality': s['_quality'],
- 'url': s['_stream'],
- }
-
- format['format_id'] = '%s-%s' % (
- determine_ext(format['url']), format['quality'])
+ info = self._extract_media_info(
+ 'http://www.ardmediathek.de/play/media/%s' % video_id, webpage, video_id)
- formats.append(format)
-
- self._sort_formats(formats)
-
- return {
+ info.update({
'id': video_id,
'title': title,
'description': description,
- 'formats': formats,
'thumbnail': thumbnail,
- }
+ })
+
+ return info
class ARDIE(InfoExtractor):
@@ -189,3 +272,41 @@ class ARDIE(InfoExtractor):
'upload_date': upload_date,
'thumbnail': thumbnail,
}
+
+
+class SportschauIE(ARDMediathekIE):
+ IE_NAME = 'Sportschau'
+ _VALID_URL = r'(?P<baseurl>https?://(?:www\.)?sportschau\.de/(?:[^/]+/)+video(?P<id>[^/#?]+))\.html'
+ _TESTS = [{
+ 'url': 'http://www.sportschau.de/tourdefrance/videoseppeltkokainhatnichtsmitklassischemdopingzutun100.html',
+ 'info_dict': {
+ 'id': 'seppeltkokainhatnichtsmitklassischemdopingzutun100',
+ 'ext': 'mp4',
+ 'title': 'Seppelt: "Kokain hat nichts mit klassischem Doping zu tun"',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'description': 'Der ARD-Doping Experte Hajo Seppelt gibt seine Einschätzung zum ersten Dopingfall der diesjährigen Tour de France um den Italiener Luca Paolini ab.',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }]
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ base_url = mobj.group('baseurl')
+
+ webpage = self._download_webpage(url, video_id)
+ title = get_element_by_attribute('class', 'headline', webpage)
+ description = self._html_search_meta('description', webpage, 'description')
+
+ info = self._extract_media_info(
+ base_url + '-mc_defaultQuality-h.json', webpage, video_id)
+
+ info.update({
+ 'title': title,
+ 'description': description,
+ })
+
+ return info
diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
new file mode 100644
index 000000000..abc5a44a1
--- /dev/null
+++ b/youtube_dl/extractor/bbc.py
@@ -0,0 +1,780 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+import xml.etree.ElementTree
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ float_or_none,
+ int_or_none,
+ parse_duration,
+ parse_iso8601,
+)
+from ..compat import compat_HTTPError
+
+
+class BBCCoUkIE(InfoExtractor):
+ IE_NAME = 'bbc.co.uk'
+ IE_DESC = 'BBC iPlayer'
+ _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
+
+ _MEDIASELECTOR_URLS = [
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s',
+ ]
+
+ _TESTS = [
+ {
+ 'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
+ 'info_dict': {
+ 'id': 'b039d07m',
+ 'ext': 'flv',
+ 'title': 'Kaleidoscope, Leonard Cohen',
+ 'description': 'The Canadian poet and songwriter reflects on his musical career.',
+ 'duration': 1740,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ },
+ {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
+ 'info_dict': {
+ 'id': 'b00yng1d',
+ 'ext': 'flv',
+ 'title': 'The Man in Black: Series 3: The Printed Name',
+ 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
+ 'duration': 1800,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'Episode is no longer available on BBC iPlayer Radio',
+ },
+ {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
+ 'info_dict': {
+ 'id': 'b00yng1d',
+ 'ext': 'flv',
+ 'title': 'The Voice UK: Series 3: Blind Auditions 5',
+ 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
+ 'duration': 5100,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
+ },
+ {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
+ 'info_dict': {
+ 'id': 'b03k3pb7',
+ 'ext': 'flv',
+ 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
+ 'description': '2. Invasion',
+ 'duration': 3600,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
+ }, {
+ 'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
+ 'info_dict': {
+ 'id': 'b04v209v',
+ 'ext': 'flv',
+ 'title': 'Pete Tong, The Essential New Tune Special',
+ 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
+ 'duration': 10800,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
+ 'note': 'Audio',
+ 'info_dict': {
+ 'id': 'p02frcch',
+ 'ext': 'flv',
+ 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
+ 'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
+ 'duration': 3507,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
+ 'note': 'Video',
+ 'info_dict': {
+ 'id': 'p025c103',
+ 'ext': 'flv',
+ 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
+ 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
+ 'duration': 226,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
+ 'info_dict': {
+ 'id': 'p02n76xf',
+ 'ext': 'flv',
+ 'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
+ 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
+ 'duration': 3540,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'geolocation',
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
+ 'info_dict': {
+ 'id': 'b05zmgw1',
+ 'ext': 'flv',
+ 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
+ 'title': 'Royal Academy Summer Exhibition',
+ 'duration': 3540,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ 'skip': 'geolocation',
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
+ 'only_matching': True,
+ }
+ ]
+
+ class MediaSelectionError(Exception):
+ def __init__(self, id):
+ self.id = id
+
+ def _extract_asx_playlist(self, connection, programme_id):
+ asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
+ return [ref.get('href') for ref in asx.findall('./Entry/ref')]
+
+ def _extract_connection(self, connection, programme_id):
+ formats = []
+ protocol = connection.get('protocol')
+ supplier = connection.get('supplier')
+ if protocol == 'http':
+ href = connection.get('href')
+ transfer_format = connection.get('transferFormat')
+ # ASX playlist
+ if supplier == 'asx':
+ for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
+ formats.append({
+ 'url': ref,
+ 'format_id': 'ref%s_%s' % (i, supplier),
+ })
+ # Skip DASH until supported
+ elif transfer_format == 'dash':
+ pass
+ # Direct link
+ else:
+ formats.append({
+ 'url': href,
+ 'format_id': supplier,
+ })
+ elif protocol == 'rtmp':
+ application = connection.get('application', 'ondemand')
+ auth_string = connection.get('authString')
+ identifier = connection.get('identifier')
+ server = connection.get('server')
+ formats.append({
+ 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
+ 'play_path': identifier,
+ 'app': '%s?%s' % (application, auth_string),
+ 'page_url': 'http://www.bbc.co.uk',
+ 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
+ 'rtmp_live': False,
+ 'ext': 'flv',
+ 'format_id': supplier,
+ })
+ return formats
+
+ def _extract_items(self, playlist):
+ return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
+
+ def _extract_medias(self, media_selection):
+ error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
+ if error is not None:
+ raise BBCCoUkIE.MediaSelectionError(error.get('id'))
+ return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
+
+ def _extract_connections(self, media):
+ return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
+
+ def _extract_video(self, media, programme_id):
+ formats = []
+ vbr = int_or_none(media.get('bitrate'))
+ vcodec = media.get('encoding')
+ service = media.get('service')
+ width = int_or_none(media.get('width'))
+ height = int_or_none(media.get('height'))
+ file_size = int_or_none(media.get('media_file_size'))
+ for connection in self._extract_connections(media):
+ conn_formats = self._extract_connection(connection, programme_id)
+ for format in conn_formats:
+ format.update({
+ 'format_id': '%s_%s' % (service, format['format_id']),
+ 'width': width,
+ 'height': height,
+ 'vbr': vbr,
+ 'vcodec': vcodec,
+ 'filesize': file_size,
+ })
+ formats.extend(conn_formats)
+ return formats
+
+ def _extract_audio(self, media, programme_id):
+ formats = []
+ abr = int_or_none(media.get('bitrate'))
+ acodec = media.get('encoding')
+ service = media.get('service')
+ for connection in self._extract_connections(media):
+ conn_formats = self._extract_connection(connection, programme_id)
+ for format in conn_formats:
+ format.update({
+ 'format_id': '%s_%s' % (service, format['format_id']),
+ 'abr': abr,
+ 'acodec': acodec,
+ })
+ formats.extend(conn_formats)
+ return formats
+
+ def _get_subtitles(self, media, programme_id):
+ subtitles = {}
+ for connection in self._extract_connections(media):
+ captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
+ lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
+ subtitles[lang] = [
+ {
+ 'url': connection.get('href'),
+ 'ext': 'ttml',
+ },
+ ]
+ return subtitles
+
+ def _raise_extractor_error(self, media_selection_error):
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, media_selection_error.id),
+ expected=True)
+
+ def _download_media_selector(self, programme_id):
+ last_exception = None
+ for mediaselector_url in self._MEDIASELECTOR_URLS:
+ try:
+ return self._download_media_selector_url(
+ mediaselector_url % programme_id, programme_id)
+ except BBCCoUkIE.MediaSelectionError as e:
+ if e.id == 'notukerror':
+ last_exception = e
+ continue
+ self._raise_extractor_error(e)
+ self._raise_extractor_error(last_exception)
+
+ def _download_media_selector_url(self, url, programme_id=None):
+ try:
+ media_selection = self._download_xml(
+ url, programme_id, 'Downloading media selection XML')
+ except ExtractorError as ee:
+ if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
+ media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
+ else:
+ raise
+ return self._process_media_selector(media_selection, programme_id)
+
+ def _process_media_selector(self, media_selection, programme_id):
+ formats = []
+ subtitles = None
+
+ for media in self._extract_medias(media_selection):
+ kind = media.get('kind')
+ if kind == 'audio':
+ formats.extend(self._extract_audio(media, programme_id))
+ elif kind == 'video':
+ formats.extend(self._extract_video(media, programme_id))
+ elif kind == 'captions':
+ subtitles = self.extract_subtitles(media, programme_id)
+ return formats, subtitles
+
+ def _download_playlist(self, playlist_id):
+ try:
+ playlist = self._download_json(
+ 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
+ playlist_id, 'Downloading playlist JSON')
+
+ version = playlist.get('defaultAvailableVersion')
+ if version:
+ smp_config = version['smpConfig']
+ title = smp_config['title']
+ description = smp_config['summary']
+ for item in smp_config['items']:
+ kind = item['kind']
+ if kind != 'programme' and kind != 'radioProgramme':
+ continue
+ programme_id = item.get('vpid')
+ duration = int_or_none(item.get('duration'))
+ formats, subtitles = self._download_media_selector(programme_id)
+ return programme_id, title, description, duration, formats, subtitles
+ except ExtractorError as ee:
+ if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
+ raise
+
+ # fallback to legacy playlist
+ return self._process_legacy_playlist(playlist_id)
+
+ def _process_legacy_playlist_url(self, url, display_id):
+ playlist = self._download_legacy_playlist_url(url, display_id)
+ return self._extract_from_legacy_playlist(playlist, display_id)
+
+ def _process_legacy_playlist(self, playlist_id):
+ return self._process_legacy_playlist_url(
+ 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id)
+
+ def _download_legacy_playlist_url(self, url, playlist_id=None):
+ return self._download_xml(
+ url, playlist_id, 'Downloading legacy playlist XML')
+
+ def _extract_from_legacy_playlist(self, playlist, playlist_id):
+ no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
+ if no_items is not None:
+ reason = no_items.get('reason')
+ if reason == 'preAvailability':
+ msg = 'Episode %s is not yet available' % playlist_id
+ elif reason == 'postAvailability':
+ msg = 'Episode %s is no longer available' % playlist_id
+ elif reason == 'noMedia':
+ msg = 'Episode %s is not currently available' % playlist_id
+ else:
+ msg = 'Episode %s is not available: %s' % (playlist_id, reason)
+ raise ExtractorError(msg, expected=True)
+
+ for item in self._extract_items(playlist):
+ kind = item.get('kind')
+ if kind != 'programme' and kind != 'radioProgramme':
+ continue
+ title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
+ description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
+
+ def get_programme_id(item):
+ def get_from_attributes(item):
+ for p in('identifier', 'group'):
+ value = item.get(p)
+ if value and re.match(r'^[pb][\da-z]{7}$', value):
+ return value
+ get_from_attributes(item)
+ mediator = item.find('./{http://bbc.co.uk/2008/emp/playlist}mediator')
+ if mediator is not None:
+ return get_from_attributes(mediator)
+
+ programme_id = get_programme_id(item)
+ duration = int_or_none(item.get('duration'))
+ # TODO: programme_id can be None and media items can be incorporated right inside
+ # playlist's item (e.g. http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
+ # as f4m and m3u8
+ formats, subtitles = self._download_media_selector(programme_id)
+
+ return programme_id, title, description, duration, formats, subtitles
+
+ def _real_extract(self, url):
+ group_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, group_id, 'Downloading video page')
+
+ programme_id = None
+
+ tviplayer = self._search_regex(
+ r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
+ webpage, 'player', default=None)
+
+ if tviplayer:
+ player = self._parse_json(tviplayer, group_id).get('player', {})
+ duration = int_or_none(player.get('duration'))
+ programme_id = player.get('vpid')
+
+ if not programme_id:
+ programme_id = self._search_regex(
+ r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
+
+ if programme_id:
+ formats, subtitles = self._download_media_selector(programme_id)
+ title = self._og_search_title(webpage)
+ description = self._search_regex(
+ r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
+ webpage, 'description', fatal=False)
+ else:
+ programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
+
+ self._sort_formats(formats)
+
+ return {
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': self._og_search_thumbnail(webpage, default=None),
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+
+class BBCIE(BBCCoUkIE):
+ IE_NAME = 'bbc'
+ IE_DESC = 'BBC'
+ _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
+
+ _MEDIASELECTOR_URLS = [
+ # Provides more formats, namely direct mp4 links, but fails on some videos with
+ # notukerror for non UK (?) users (e.g.
+ # http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
+ 'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s',
+ # Provides fewer formats, but works everywhere for everybody (hopefully)
+ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s',
+ ]
+
+ _TESTS = [{
+ # article with multiple videos embedded with data-media-meta containing
+ # playlist.sxml, externalId and no direct video links
+ 'url': 'http://www.bbc.com/news/world-europe-32668511',
+ 'info_dict': {
+ 'id': 'world-europe-32668511',
+ 'title': 'Russia stages massive WW2 parade despite Western boycott',
+ 'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
+ },
+ 'playlist_count': 2,
+ }, {
+ # article with multiple videos embedded with data-media-meta (more videos)
+ 'url': 'http://www.bbc.com/news/business-28299555',
+ 'info_dict': {
+ 'id': 'business-28299555',
+ 'title': 'Farnborough Airshow: Video highlights',
+ 'description': 'BBC reports and video highlights at the Farnborough Airshow.',
+ },
+ 'playlist_count': 9,
+ 'skip': 'Save time',
+ }, {
+ # article with multiple videos embedded with `new SMP()`
+ 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
+ 'info_dict': {
+ 'id': '3662a707-0af9-3149-963f-47bea720b460',
+ 'title': 'BBC Blogs - Adam Curtis - BUGGER',
+ },
+ 'playlist_count': 18,
+ }, {
+ # single video embedded with mediaAssetPage.init()
+ 'url': 'http://www.bbc.com/news/world-europe-32041533',
+ 'info_dict': {
+ 'id': 'p02mprgb',
+ 'ext': 'mp4',
+ 'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
+ 'duration': 47,
+ 'timestamp': 1427219242,
+ 'upload_date': '20150324',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # article with single video embedded with data-media-meta containing
+ # direct video links (for now these are extracted) and playlist.xml (with
+ # media items as f4m and m3u8 - currently unsupported)
+ 'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu',
+ 'info_dict': {
+ 'id': '150615_telabyad_kentin_cogu',
+ 'ext': 'mp4',
+ 'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde",
+ 'duration': 47,
+ 'timestamp': 1434397334,
+ 'upload_date': '20150615',
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # single video embedded with mediaAssetPage.init() (regional section)
+ 'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
+ 'info_dict': {
+ 'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
+ 'ext': 'mp4',
+ 'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
+ 'duration': 87,
+ 'timestamp': 1434713142,
+ 'upload_date': '20150619',
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # single video from video playlist embedded with vxp-playlist-data JSON
+ 'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
+ 'info_dict': {
+ 'id': 'p02w6qjc',
+ 'ext': 'mp4',
+ 'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''',
+ 'duration': 56,
+ },
+ 'params': {
+ 'skip_download': True,
+ }
+ }, {
+ # single video story with digitalData
+ 'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
+ 'info_dict': {
+ 'id': 'p02q6gc4',
+ 'ext': 'flv',
+ 'title': 'Sri Lanka’s spicy secret',
+ 'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
+ 'timestamp': 1437674293,
+ 'upload_date': '20150723',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # single video story without digitalData
+ 'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
+ 'info_dict': {
+ 'id': 'p018zqqg',
+ 'ext': 'mp4',
+ 'title': 'Hyundai Santa Fe Sport: Rock star',
+ 'description': 'md5:b042a26142c4154a6e472933cf20793d',
+ 'timestamp': 1368473503,
+ 'upload_date': '20130513',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # single video with playlist.sxml URL
+ 'url': 'http://www.bbc.com/sport/0/football/33653409',
+ 'info_dict': {
+ 'id': 'p02xycnp',
+ 'ext': 'mp4',
+ 'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
+ 'description': 'md5:398fca0e2e701c609d726e034fa1fc89',
+ 'duration': 140,
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ }
+ }, {
+ # single video with playlist URL from weather section
+ 'url': 'http://www.bbc.com/weather/features/33601775',
+ 'only_matching': True,
+ }, {
+ # custom redirection to www.bbc.com
+ 'url': 'http://www.bbc.co.uk/news/science-environment-33661876',
+ 'only_matching': True,
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url)
+
+ def _extract_from_media_meta(self, media_meta, video_id):
+ # Direct links to media in media metadata (e.g.
+ # http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
+ # TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml
+ source_files = media_meta.get('sourceFiles')
+ if source_files:
+ return [{
+ 'url': f['url'],
+ 'format_id': format_id,
+ 'ext': f.get('encoding'),
+ 'tbr': float_or_none(f.get('bitrate'), 1000),
+ 'filesize': int_or_none(f.get('filesize')),
+ } for format_id, f in source_files.items() if f.get('url')], []
+
+ programme_id = media_meta.get('externalId')
+ if programme_id:
+ return self._download_media_selector(programme_id)
+
+ # Process playlist.sxml as legacy playlist
+ href = media_meta.get('href')
+ if href:
+ playlist = self._download_legacy_playlist_url(href)
+ _, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id)
+ return formats, subtitles
+
+ return [], []
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, playlist_id)
+
+ timestamp = parse_iso8601(self._search_regex(
+ [r'"datePublished":\s*"([^"]+)',
+ r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"',
+ r'itemprop="datePublished"[^>]+datetime="([^"]+)"'],
+ webpage, 'date', default=None))
+
+ # single video with playlist.sxml URL (e.g. http://www.bbc.com/sport/0/football/3365340ng)
+ playlist = self._search_regex(
+ r'<param[^>]+name="playlist"[^>]+value="([^"]+)"',
+ webpage, 'playlist', default=None)
+ if playlist:
+ programme_id, title, description, duration, formats, subtitles = \
+ self._process_legacy_playlist_url(playlist, playlist_id)
+ self._sort_formats(formats)
+ return {
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ # single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
+ programme_id = self._search_regex(
+ [r'data-video-player-vpid="([\da-z]{8})"',
+ r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'],
+ webpage, 'vpid', default=None)
+ if programme_id:
+ formats, subtitles = self._download_media_selector(programme_id)
+ self._sort_formats(formats)
+ # digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star)
+ digital_data = self._parse_json(
+ self._search_regex(
+ r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'),
+ programme_id, fatal=False)
+ page_info = digital_data.get('page', {}).get('pageInfo', {})
+ title = page_info.get('pageName') or self._og_search_title(webpage)
+ description = page_info.get('description') or self._og_search_description(webpage)
+ timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp
+ return {
+ 'id': programme_id,
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ playlist_title = self._html_search_regex(
+ r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title')
+ playlist_description = self._og_search_description(webpage, default=None)
+
+ def extract_all(pattern):
+ return list(filter(None, map(
+ lambda s: self._parse_json(s, playlist_id, fatal=False),
+ re.findall(pattern, webpage))))
+
+ # Multiple video article (e.g.
+ # http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
+ EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?'
+ entries = []
+ for match in extract_all(r'new\s+SMP\(({.+?})\)'):
+ embed_url = match.get('playerSettings', {}).get('externalEmbedUrl')
+ if embed_url and re.match(EMBED_URL, embed_url):
+ entries.append(embed_url)
+ entries.extend(re.findall(
+ r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage))
+ if entries:
+ return self.playlist_result(
+ [self.url_result(entry, 'BBCCoUk') for entry in entries],
+ playlist_id, playlist_title, playlist_description)
+
+ # Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511)
+ medias = extract_all(r"data-media-meta='({[^']+})'")
+
+ if not medias:
+ # Single video article (e.g. http://www.bbc.com/news/video_and_audio/international)
+ media_asset = self._search_regex(
+ r'mediaAssetPage\.init\(\s*({.+?}), "/',
+ webpage, 'media asset', default=None)
+ if media_asset:
+ media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False)
+ medias = []
+ for video in media_asset_page.get('videos', {}).values():
+ medias.extend(video.values())
+
+ if not medias:
+ # Multiple video playlist with single `now playing` entry (e.g.
+ # http://www.bbc.com/news/video_and_audio/must_see/33767813)
+ vxp_playlist = self._parse_json(
+ self._search_regex(
+ r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>',
+ webpage, 'playlist data'),
+ playlist_id)
+ playlist_medias = []
+ for item in vxp_playlist:
+ media = item.get('media')
+ if not media:
+ continue
+ playlist_medias.append(media)
+ # Download single video if found media with asset id matching the video id from URL
+ if item.get('advert', {}).get('assetId') == playlist_id:
+ medias = [media]
+ break
+ # Fallback to the whole playlist
+ if not medias:
+ medias = playlist_medias
+
+ entries = []
+ for num, media_meta in enumerate(medias, start=1):
+ formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id)
+ if not formats:
+ continue
+ self._sort_formats(formats)
+
+ video_id = media_meta.get('externalId')
+ if not video_id:
+ video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num)
+
+ title = media_meta.get('caption')
+ if not title:
+ title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num)
+
+ duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration'))
+
+ images = []
+ for image in media_meta.get('images', {}).values():
+ images.extend(image.values())
+ if 'image' in media_meta:
+ images.append(media_meta['image'])
+
+ thumbnails = [{
+ 'url': image.get('href'),
+ 'width': int_or_none(image.get('width')),
+ 'height': int_or_none(image.get('height')),
+ } for image in images]
+
+ entries.append({
+ 'id': video_id,
+ 'title': title,
+ 'thumbnails': thumbnails,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ })
+
+ return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
diff --git a/youtube_dl/extractor/bbccouk.py b/youtube_dl/extractor/bbccouk.py
deleted file mode 100644
index 5825d2867..000000000
--- a/youtube_dl/extractor/bbccouk.py
+++ /dev/null
@@ -1,379 +0,0 @@
-from __future__ import unicode_literals
-
-import xml.etree.ElementTree
-
-from .common import InfoExtractor
-from ..utils import (
- ExtractorError,
- int_or_none,
-)
-from ..compat import compat_HTTPError
-
-
-class BBCCoUkIE(InfoExtractor):
- IE_NAME = 'bbc.co.uk'
- IE_DESC = 'BBC iPlayer'
- _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
-
- _TESTS = [
- {
- 'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
- 'info_dict': {
- 'id': 'b039d07m',
- 'ext': 'flv',
- 'title': 'Kaleidoscope, Leonard Cohen',
- 'description': 'The Canadian poet and songwriter reflects on his musical career.',
- 'duration': 1740,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- },
- {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
- 'info_dict': {
- 'id': 'b00yng1d',
- 'ext': 'flv',
- 'title': 'The Man in Black: Series 3: The Printed Name',
- 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
- 'duration': 1800,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'Episode is no longer available on BBC iPlayer Radio',
- },
- {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
- 'info_dict': {
- 'id': 'b00yng1d',
- 'ext': 'flv',
- 'title': 'The Voice UK: Series 3: Blind Auditions 5',
- 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
- 'duration': 5100,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
- },
- {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
- 'info_dict': {
- 'id': 'b03k3pb7',
- 'ext': 'flv',
- 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
- 'description': '2. Invasion',
- 'duration': 3600,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
- }, {
- 'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
- 'info_dict': {
- 'id': 'b04v209v',
- 'ext': 'flv',
- 'title': 'Pete Tong, The Essential New Tune Special',
- 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
- 'duration': 10800,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- }, {
- 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
- 'note': 'Audio',
- 'info_dict': {
- 'id': 'p02frcch',
- 'ext': 'flv',
- 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
- 'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
- 'duration': 3507,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- }, {
- 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
- 'note': 'Video',
- 'info_dict': {
- 'id': 'p025c103',
- 'ext': 'flv',
- 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
- 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
- 'duration': 226,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- }
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
- 'info_dict': {
- 'id': 'p02n76xf',
- 'ext': 'flv',
- 'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
- 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
- 'duration': 3540,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'geolocation',
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
- 'info_dict': {
- 'id': 'b05zmgw1',
- 'ext': 'flv',
- 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
- 'title': 'Royal Academy Summer Exhibition',
- 'duration': 3540,
- },
- 'params': {
- # rtmp download
- 'skip_download': True,
- },
- 'skip': 'geolocation',
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
- 'only_matching': True,
- }, {
- 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
- 'only_matching': True,
- }, {
- 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
- 'only_matching': True,
- }
- ]
-
- def _extract_asx_playlist(self, connection, programme_id):
- asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
- return [ref.get('href') for ref in asx.findall('./Entry/ref')]
-
- def _extract_connection(self, connection, programme_id):
- formats = []
- protocol = connection.get('protocol')
- supplier = connection.get('supplier')
- if protocol == 'http':
- href = connection.get('href')
- # ASX playlist
- if supplier == 'asx':
- for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
- formats.append({
- 'url': ref,
- 'format_id': 'ref%s_%s' % (i, supplier),
- })
- # Direct link
- else:
- formats.append({
- 'url': href,
- 'format_id': supplier,
- })
- elif protocol == 'rtmp':
- application = connection.get('application', 'ondemand')
- auth_string = connection.get('authString')
- identifier = connection.get('identifier')
- server = connection.get('server')
- formats.append({
- 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
- 'play_path': identifier,
- 'app': '%s?%s' % (application, auth_string),
- 'page_url': 'http://www.bbc.co.uk',
- 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
- 'rtmp_live': False,
- 'ext': 'flv',
- 'format_id': supplier,
- })
- return formats
-
- def _extract_items(self, playlist):
- return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
-
- def _extract_medias(self, media_selection):
- error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
- if error is not None:
- raise ExtractorError(
- '%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
- return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
-
- def _extract_connections(self, media):
- return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
-
- def _extract_video(self, media, programme_id):
- formats = []
- vbr = int(media.get('bitrate'))
- vcodec = media.get('encoding')
- service = media.get('service')
- width = int(media.get('width'))
- height = int(media.get('height'))
- file_size = int(media.get('media_file_size'))
- for connection in self._extract_connections(media):
- conn_formats = self._extract_connection(connection, programme_id)
- for format in conn_formats:
- format.update({
- 'format_id': '%s_%s' % (service, format['format_id']),
- 'width': width,
- 'height': height,
- 'vbr': vbr,
- 'vcodec': vcodec,
- 'filesize': file_size,
- })
- formats.extend(conn_formats)
- return formats
-
- def _extract_audio(self, media, programme_id):
- formats = []
- abr = int(media.get('bitrate'))
- acodec = media.get('encoding')
- service = media.get('service')
- for connection in self._extract_connections(media):
- conn_formats = self._extract_connection(connection, programme_id)
- for format in conn_formats:
- format.update({
- 'format_id': '%s_%s' % (service, format['format_id']),
- 'abr': abr,
- 'acodec': acodec,
- })
- formats.extend(conn_formats)
- return formats
-
- def _get_subtitles(self, media, programme_id):
- subtitles = {}
- for connection in self._extract_connections(media):
- captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
- lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
- subtitles[lang] = [
- {
- 'url': connection.get('href'),
- 'ext': 'ttml',
- },
- ]
- return subtitles
-
- def _download_media_selector(self, programme_id):
- try:
- media_selection = self._download_xml(
- 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
- programme_id, 'Downloading media selection XML')
- except ExtractorError as ee:
- if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
- media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
- else:
- raise
-
- formats = []
- subtitles = None
-
- for media in self._extract_medias(media_selection):
- kind = media.get('kind')
- if kind == 'audio':
- formats.extend(self._extract_audio(media, programme_id))
- elif kind == 'video':
- formats.extend(self._extract_video(media, programme_id))
- elif kind == 'captions':
- subtitles = self.extract_subtitles(media, programme_id)
-
- return formats, subtitles
-
- def _download_playlist(self, playlist_id):
- try:
- playlist = self._download_json(
- 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
- playlist_id, 'Downloading playlist JSON')
-
- version = playlist.get('defaultAvailableVersion')
- if version:
- smp_config = version['smpConfig']
- title = smp_config['title']
- description = smp_config['summary']
- for item in smp_config['items']:
- kind = item['kind']
- if kind != 'programme' and kind != 'radioProgramme':
- continue
- programme_id = item.get('vpid')
- duration = int(item.get('duration'))
- formats, subtitles = self._download_media_selector(programme_id)
- return programme_id, title, description, duration, formats, subtitles
- except ExtractorError as ee:
- if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
- raise
-
- # fallback to legacy playlist
- playlist = self._download_xml(
- 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id,
- playlist_id, 'Downloading legacy playlist XML')
-
- no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
- if no_items is not None:
- reason = no_items.get('reason')
- if reason == 'preAvailability':
- msg = 'Episode %s is not yet available' % playlist_id
- elif reason == 'postAvailability':
- msg = 'Episode %s is no longer available' % playlist_id
- elif reason == 'noMedia':
- msg = 'Episode %s is not currently available' % playlist_id
- else:
- msg = 'Episode %s is not available: %s' % (playlist_id, reason)
- raise ExtractorError(msg, expected=True)
-
- for item in self._extract_items(playlist):
- kind = item.get('kind')
- if kind != 'programme' and kind != 'radioProgramme':
- continue
- title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
- description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
- programme_id = item.get('identifier')
- duration = int(item.get('duration'))
- formats, subtitles = self._download_media_selector(programme_id)
-
- return programme_id, title, description, duration, formats, subtitles
-
- def _real_extract(self, url):
- group_id = self._match_id(url)
-
- webpage = self._download_webpage(url, group_id, 'Downloading video page')
-
- programme_id = None
-
- tviplayer = self._search_regex(
- r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
- webpage, 'player', default=None)
-
- if tviplayer:
- player = self._parse_json(tviplayer, group_id).get('player', {})
- duration = int_or_none(player.get('duration'))
- programme_id = player.get('vpid')
-
- if not programme_id:
- programme_id = self._search_regex(
- r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
-
- if programme_id:
- formats, subtitles = self._download_media_selector(programme_id)
- title = self._og_search_title(webpage)
- description = self._search_regex(
- r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
- webpage, 'description', fatal=False)
- else:
- programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
-
- self._sort_formats(formats)
-
- return {
- 'id': programme_id,
- 'title': title,
- 'description': description,
- 'thumbnail': self._og_search_thumbnail(webpage, default=None),
- 'duration': duration,
- 'formats': formats,
- 'subtitles': subtitles,
- }
diff --git a/youtube_dl/extractor/breakcom.py b/youtube_dl/extractor/breakcom.py
index 809287d14..aa08051b1 100644
--- a/youtube_dl/extractor/breakcom.py
+++ b/youtube_dl/extractor/breakcom.py
@@ -18,6 +18,7 @@ class BreakIE(InfoExtractor):
'id': '2468056',
'ext': 'mp4',
'title': 'When Girls Act Like D-Bags',
+ 'age_limit': 13,
}
}, {
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py
index 699b4f7d0..57e0cda2c 100644
--- a/youtube_dl/extractor/canalplus.py
+++ b/youtube_dl/extractor/canalplus.py
@@ -106,15 +106,11 @@ class CanalplusIE(InfoExtractor):
continue
format_id = fmt.tag
if format_id == 'HLS':
- hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
- for fmt in hls_formats:
- fmt['preference'] = preference(format_id)
- formats.extend(hls_formats)
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', preference=preference(format_id)))
elif format_id == 'HDS':
- hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
- for fmt in hds_formats:
- fmt['preference'] = preference(format_id)
- formats.extend(hds_formats)
+ formats.extend(self._extract_f4m_formats(
+ format_url + '?hdcore=2.11.3', video_id, preference=preference(format_id)))
else:
formats.append({
'url': format_url,
diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py
index dda583680..e857e66f4 100644
--- a/youtube_dl/extractor/ceskatelevize.py
+++ b/youtube_dl/extractor/ceskatelevize.py
@@ -17,55 +17,81 @@ from ..utils import (
class CeskaTelevizeIE(InfoExtractor):
- _VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
-
- _TESTS = [
- {
- 'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
+ _VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(?:[^/]+/)*(?P<id>[^/#?]+)/*(?:[#?].*)?$'
+ _TESTS = [{
+ 'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
+ 'info_dict': {
+ 'id': '61924494876951776',
+ 'ext': 'mp4',
+ 'title': 'Hyde Park Civilizace',
+ 'description': 'md5:fe93f6eda372d150759d11644ebbfb4a',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 3350,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
+ 'info_dict': {
+ 'id': '61924494876844374',
+ 'ext': 'mp4',
+ 'title': 'První republika: Zpěvačka z Dupárny Bobina',
+ 'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'duration': 88.4,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }, {
+ # video with 18+ caution trailer
+ 'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/',
+ 'info_dict': {
+ 'id': '215562210900007-bogotart',
+ 'title': 'Queer: Bogotart',
+ 'description': 'Alternativní průvodce současným queer světem',
+ },
+ 'playlist': [{
'info_dict': {
- 'id': '214411058091220',
+ 'id': '61924494876844842',
'ext': 'mp4',
- 'title': 'Hyde Park Civilizace',
- 'description': 'Věda a současná civilizace. Interaktivní pořad - prostor pro vaše otázky a komentáře',
- 'thumbnail': 're:^https?://.*\.jpg',
- 'duration': 3350,
- },
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ 'title': 'Queer: Bogotart (Varování 18+)',
+ 'duration': 10.2,
},
- },
- {
- 'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
+ }, {
'info_dict': {
- 'id': '14716',
+ 'id': '61924494877068022',
'ext': 'mp4',
- 'title': 'První republika: Zpěvačka z Dupárny Bobina',
- 'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
+ 'title': 'Queer: Bogotart (Queer)',
'thumbnail': 're:^https?://.*\.jpg',
- 'duration': 88.4,
- },
- 'params': {
- # m3u8 download
- 'skip_download': True,
+ 'duration': 1558.3,
},
+ }],
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
},
- ]
+ }]
def _real_extract(self, url):
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
+ playlist_id = mobj.group('id')
- webpage = self._download_webpage(url, video_id)
+ webpage = self._download_webpage(url, playlist_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
- typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
- episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
+ typ = self._html_search_regex(
+ r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
+ episode_id = self._html_search_regex(
+ r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
data = {
'playlist[0][type]': typ,
@@ -83,7 +109,7 @@ class CeskaTelevizeIE(InfoExtractor):
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
- playlistpage = self._download_json(req, video_id)
+ playlistpage = self._download_json(req, playlist_id)
playlist_url = playlistpage['url']
if playlist_url == 'error_region':
@@ -92,33 +118,43 @@ class CeskaTelevizeIE(InfoExtractor):
req = compat_urllib_request.Request(compat_urllib_parse_unquote(playlist_url))
req.add_header('Referer', url)
- playlist = self._download_json(req, video_id)
-
- item = playlist['playlist'][0]
- formats = []
- for format_id, stream_url in item['streamUrls'].items():
- formats.extend(self._extract_m3u8_formats(stream_url, video_id, 'mp4'))
- self._sort_formats(formats)
-
- title = self._og_search_title(webpage)
- description = self._og_search_description(webpage)
- duration = float_or_none(item.get('duration'))
- thumbnail = item.get('previewImageUrl')
-
- subtitles = {}
- subs = item.get('subtitles')
- if subs:
- subtitles = self.extract_subtitles(episode_id, subs)
-
- return {
- 'id': episode_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'formats': formats,
- 'subtitles': subtitles,
- }
+ playlist_title = self._og_search_title(webpage)
+ playlist_description = self._og_search_description(webpage)
+
+ playlist = self._download_json(req, playlist_id)['playlist']
+ playlist_len = len(playlist)
+
+ entries = []
+ for item in playlist:
+ formats = []
+ for format_id, stream_url in item['streamUrls'].items():
+ formats.extend(self._extract_m3u8_formats(
+ stream_url, playlist_id, 'mp4', entry_protocol='m3u8_native'))
+ self._sort_formats(formats)
+
+ item_id = item.get('id') or item['assetId']
+ title = item['title']
+
+ duration = float_or_none(item.get('duration'))
+ thumbnail = item.get('previewImageUrl')
+
+ subtitles = {}
+ if item.get('type') == 'VOD':
+ subs = item.get('subtitles')
+ if subs:
+ subtitles = self.extract_subtitles(episode_id, subs)
+
+ entries.append({
+ 'id': item_id,
+ 'title': playlist_title if playlist_len == 1 else '%s (%s)' % (playlist_title, title),
+ 'description': playlist_description if playlist_len == 1 else None,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ })
+
+ return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
def _get_subtitles(self, episode_id, subs):
original_subtitles = self._download_webpage(
diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py
index c949a4814..fd1770dac 100644
--- a/youtube_dl/extractor/cinemassacre.py
+++ b/youtube_dl/extractor/cinemassacre.py
@@ -6,6 +6,7 @@ import re
from .common import InfoExtractor
from ..utils import ExtractorError
from .bliptv import BlipTVIE
+from .screenwavemedia import ScreenwaveMediaIE
class CinemassacreIE(InfoExtractor):
@@ -83,10 +84,10 @@ class CinemassacreIE(InfoExtractor):
playerdata_url = self._search_regex(
[
- r'src="(http://(?:player2\.screenwavemedia\.com|player\.screenwavemedia\.com/play)/[a-zA-Z]+\.php\?[^"]*\bid=.+?)"',
- r'<iframe[^>]+src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
+ ScreenwaveMediaIE.EMBED_PATTERN,
+ r'<iframe[^>]+src="(?P<url>(?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
],
- webpage, 'player data URL', default=None)
+ webpage, 'player data URL', default=None, group='url')
if not playerdata_url:
playerdata_url = BlipTVIE._extract_url(webpage)
if not playerdata_url:
diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py
index a5c3cb7c6..7af903571 100644
--- a/youtube_dl/extractor/clipfish.py
+++ b/youtube_dl/extractor/clipfish.py
@@ -1,53 +1,68 @@
from __future__ import unicode_literals
import re
-import time
-import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
- parse_duration,
+ determine_ext,
+ int_or_none,
+ js_to_json,
+ parse_iso8601,
+ remove_end,
)
class ClipfishIE(InfoExtractor):
- IE_NAME = 'clipfish'
-
- _VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
+ _VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/',
- 'md5': '2521cd644e862936cf2e698206e47385',
+ 'md5': '79bc922f3e8a9097b3d68a93780fd475',
'info_dict': {
'id': '3966754',
'ext': 'mp4',
'title': 'FIFA 14 - E3 2013 Trailer',
+ 'timestamp': 1370938118,
+ 'upload_date': '20130611',
'duration': 82,
- },
- 'skip': 'Blocked in the US'
+ }
}
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
-
- info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
- (video_id, int(time.time())))
- doc = self._download_xml(
- info_url, video_id, note='Downloading info page')
- title = doc.find('title').text
- video_url = doc.find('filename').text
- if video_url is None:
- xml_bytes = xml.etree.ElementTree.tostring(doc)
- raise ExtractorError('Cannot find video URL in document %r' %
- xml_bytes)
- thumbnail = doc.find('imageurl').text
- duration = parse_duration(doc.find('duration').text)
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_info = self._parse_json(
+ js_to_json(self._html_search_regex(
+ '(?s)videoObject\s*=\s*({.+?});', webpage, 'video object')),
+ video_id)
+
+ formats = []
+ for video_url in re.findall(r'var\s+videourl\s*=\s*"([^"]+)"', webpage):
+ ext = determine_ext(video_url)
+ if ext == 'm3u8':
+ formats.append({
+ 'url': video_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'),
+ 'ext': 'mp4',
+ 'format_id': 'hls',
+ })
+ else:
+ formats.append({
+ 'url': video_url,
+ 'format_id': ext,
+ })
+ self._sort_formats(formats)
+
+ title = remove_end(self._og_search_title(webpage), ' - Video')
+ thumbnail = self._og_search_thumbnail(webpage)
+ duration = int_or_none(video_info.get('length'))
+ timestamp = parse_iso8601(self._html_search_meta('uploadDate', webpage, 'upload date'))
return {
'id': video_id,
'title': title,
- 'url': video_url,
+ 'formats': formats,
'thumbnail': thumbnail,
'duration': duration,
+ 'timestamp': timestamp,
}
diff --git a/youtube_dl/extractor/comcarcoff.py b/youtube_dl/extractor/comcarcoff.py
index 9c25b2223..81f3d7697 100644
--- a/youtube_dl/extractor/comcarcoff.py
+++ b/youtube_dl/extractor/comcarcoff.py
@@ -36,7 +36,7 @@ class ComCarCoffIE(InfoExtractor):
webpage, 'full data json'))
video_id = full_data['activeVideo']['video']
- video_data = full_data['videos'][video_id]
+ video_data = full_data.get('videos', {}).get(video_id) or full_data['singleshots'][video_id]
thumbnails = [{
'url': video_data['images']['thumb'],
}, {
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index b9014fc23..d694e818e 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -14,10 +14,14 @@ import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
+ compat_cookies,
+ compat_getpass,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
+ compat_urllib_parse,
compat_urllib_parse_urlparse,
+ compat_urllib_request,
compat_urlparse,
compat_str,
)
@@ -35,6 +39,9 @@ from ..utils import (
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
+ url_basename,
+ xpath_text,
+ xpath_with_ns,
)
@@ -65,7 +72,7 @@ class InfoExtractor(object):
Potential fields:
* url Mandatory. The URL of the video file
- * ext Will be calculated from url if missing
+ * ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
@@ -155,7 +162,7 @@ class InfoExtractor(object):
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
- * "url": A url pointing to the subtitles file
+ * "url": A URL pointing to the subtitles file
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer.
@@ -176,13 +183,18 @@ class InfoExtractor(object):
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
- webpage_url: The url to the video webpage, if given to youtube-dl it
+ webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
+ tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
+ start_time: Time in seconds where the reproduction should start, as
+ specified in the URL.
+ end_time: Time in seconds where the reproduction should end, as
+ specified in the URL.
Unless mentioned otherwise, the fields should be Unicode strings.
@@ -193,8 +205,8 @@ class InfoExtractor(object):
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
- Additionally, playlists can have "title" and "id" attributes with the same
- semantics as videos (see above).
+ Additionally, playlists can have "title", "description" and "id" attributes
+ with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
@@ -498,10 +510,16 @@ class InfoExtractor(object):
"""Report attempt to log in."""
self.to_screen('Logging in')
+ @staticmethod
+ def raise_login_required(msg='This video is only available for registered users'):
+ raise ExtractorError(
+ '%s. Use --username and --password or --netrc to provide account credentials.' % msg,
+ expected=True)
+
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
- """Returns a url that points to a page that should be processed"""
+ """Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
@@ -599,7 +617,7 @@ class InfoExtractor(object):
return (username, password)
- def _get_tfa_info(self):
+ def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
@@ -613,7 +631,7 @@ class InfoExtractor(object):
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
- return None
+ return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
@@ -626,6 +644,12 @@ class InfoExtractor(object):
template % (content_re, property_re),
]
+ @staticmethod
+ def _meta_regex(prop):
+ return r'''(?isx)<meta
+ (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
+ [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
+
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
@@ -635,7 +659,7 @@ class InfoExtractor(object):
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
- return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
+ return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
@@ -656,9 +680,7 @@ class InfoExtractor(object):
if display_name is None:
display_name = name
return self._html_search_regex(
- r'''(?isx)<meta
- (?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
- [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name),
+ self._meta_regex(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
@@ -709,20 +731,23 @@ class InfoExtractor(object):
@staticmethod
def _hidden_inputs(html):
- return dict([
- (input.group('name'), input.group('value')) for input in re.finditer(
- r'''(?x)
- <input\s+
- type=(?P<q_hidden>["\'])hidden(?P=q_hidden)\s+
- name=(?P<q_name>["\'])(?P<name>.+?)(?P=q_name)\s+
- (?:id=(?P<q_id>["\']).+?(?P=q_id)\s+)?
- value=(?P<q_value>["\'])(?P<value>.*?)(?P=q_value)
- ''', html)
- ])
+ html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
+ hidden_inputs = {}
+ for input in re.findall(r'(?i)<input([^>]+)>', html):
+ if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
+ continue
+ name = re.search(r'name=(["\'])(?P<value>.+?)\1', input)
+ if not name:
+ continue
+ value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
+ if not value:
+ continue
+ hidden_inputs[name.group('value')] = value.group('value')
+ return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
- r'(?s)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
+ r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
@@ -967,69 +992,221 @@ class InfoExtractor(object):
self._sort_formats(formats)
return formats
- # TODO: improve extraction
- def _extract_smil_formats(self, smil_url, video_id, fatal=True):
- smil = self._download_xml(
- smil_url, video_id, 'Downloading SMIL file',
- 'Unable to download SMIL file', fatal=fatal)
+ @staticmethod
+ def _xpath_ns(path, namespace=None):
+ if not namespace:
+ return path
+ out = []
+ for c in path.split('/'):
+ if not c or c == '.':
+ out.append(c)
+ else:
+ out.append('{%s}%s' % (namespace, c))
+ return '/'.join(out)
+
+ def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
+ smil = self._download_smil(smil_url, video_id, fatal=fatal)
+
if smil is False:
assert not fatal
return []
- base = smil.find('./head/meta').get('base')
+ namespace = self._parse_smil_namespace(smil)
+
+ return self._parse_smil_formats(
+ smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
+
+ def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
+ smil = self._download_smil(smil_url, video_id, fatal=fatal)
+ if smil is False:
+ return {}
+ return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
+
+ def _download_smil(self, smil_url, video_id, fatal=True):
+ return self._download_xml(
+ smil_url, video_id, 'Downloading SMIL file',
+ 'Unable to download SMIL file', fatal=fatal)
+
+ def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
+ namespace = self._parse_smil_namespace(smil)
+
+ formats = self._parse_smil_formats(
+ smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
+ subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
+
+ video_id = os.path.splitext(url_basename(smil_url))[0]
+ title = None
+ description = None
+ for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
+ name = meta.attrib.get('name')
+ content = meta.attrib.get('content')
+ if not name or not content:
+ continue
+ if not title and name == 'title':
+ title = content
+ elif not description and name in ('description', 'abstract'):
+ description = content
+
+ return {
+ 'id': video_id,
+ 'title': title or video_id,
+ 'description': description,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ def _parse_smil_namespace(self, smil):
+ return self._search_regex(
+ r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
+
+ def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
+ base = smil_url
+ for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
+ b = meta.get('base') or meta.get('httpBase')
+ if b:
+ base = b
+ break
formats = []
rtmp_count = 0
- if smil.findall('./body/seq/video'):
- video = smil.findall('./body/seq/video')[0]
- fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
- formats.extend(fmts)
- else:
- for video in smil.findall('./body/switch/video'):
- fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
- formats.extend(fmts)
+ http_count = 0
+
+ videos = smil.findall(self._xpath_ns('.//video', namespace))
+ for video in videos:
+ src = video.get('src')
+ if not src:
+ continue
+
+ bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
+ filesize = int_or_none(video.get('size') or video.get('fileSize'))
+ width = int_or_none(video.get('width'))
+ height = int_or_none(video.get('height'))
+ proto = video.get('proto')
+ ext = video.get('ext')
+ src_ext = determine_ext(src)
+ streamer = video.get('streamer') or base
+
+ if proto == 'rtmp' or streamer.startswith('rtmp'):
+ rtmp_count += 1
+ formats.append({
+ 'url': streamer,
+ 'play_path': src,
+ 'ext': 'flv',
+ 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
+ 'tbr': bitrate,
+ 'filesize': filesize,
+ 'width': width,
+ 'height': height,
+ })
+ if transform_rtmp_url:
+ streamer, src = transform_rtmp_url(streamer, src)
+ formats[-1].update({
+ 'url': streamer,
+ 'play_path': src,
+ })
+ continue
+
+ src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
+
+ if proto == 'm3u8' or src_ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ src_url, video_id, ext or 'mp4', m3u8_id='hls'))
+ continue
+
+ if src_ext == 'f4m':
+ f4m_url = src_url
+ if not f4m_params:
+ f4m_params = {
+ 'hdcore': '3.2.0',
+ 'plugin': 'flowplayer-3.2.0.1',
+ }
+ f4m_url += '&' if '?' in f4m_url else '?'
+ f4m_url += compat_urllib_parse.urlencode(f4m_params)
+ formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds'))
+ continue
+
+ if src_url.startswith('http'):
+ http_count += 1
+ formats.append({
+ 'url': src_url,
+ 'ext': ext or src_ext or 'flv',
+ 'format_id': 'http-%d' % (bitrate or http_count),
+ 'tbr': bitrate,
+ 'filesize': filesize,
+ 'width': width,
+ 'height': height,
+ })
+ continue
self._sort_formats(formats)
return formats
- def _parse_smil_video(self, video, video_id, base, rtmp_count):
- src = video.get('src')
- if not src:
- return [], rtmp_count
- bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
- width = int_or_none(video.get('width'))
- height = int_or_none(video.get('height'))
- proto = video.get('proto')
- if not proto:
- if base:
- if base.startswith('rtmp'):
- proto = 'rtmp'
- elif base.startswith('http'):
- proto = 'http'
- ext = video.get('ext')
- if proto == 'm3u8':
- return self._extract_m3u8_formats(src, video_id, ext), rtmp_count
- elif proto == 'rtmp':
- rtmp_count += 1
- streamer = video.get('streamer') or base
- return ([{
- 'url': streamer,
- 'play_path': src,
- 'ext': 'flv',
- 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
- 'tbr': bitrate,
- 'width': width,
- 'height': height,
- }], rtmp_count)
- elif proto.startswith('http'):
- return ([{
- 'url': base + src,
- 'ext': ext or 'flv',
- 'tbr': bitrate,
- 'width': width,
- 'height': height,
- }], rtmp_count)
+ def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
+ subtitles = {}
+ for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
+ src = textstream.get('src')
+ if not src:
+ continue
+ ext = textstream.get('ext') or determine_ext(src)
+ if not ext:
+ type_ = textstream.get('type')
+ SUBTITLES_TYPES = {
+ 'text/vtt': 'vtt',
+ 'text/srt': 'srt',
+ 'application/smptett+xml': 'tt',
+ }
+ if type_ in SUBTITLES_TYPES:
+ ext = SUBTITLES_TYPES[type_]
+ lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
+ subtitles.setdefault(lang, []).append({
+ 'url': src,
+ 'ext': ext,
+ })
+ return subtitles
+
+ def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
+ xspf = self._download_xml(
+ playlist_url, playlist_id, 'Downloading xpsf playlist',
+ 'Unable to download xspf manifest', fatal=fatal)
+ if xspf is False:
+ return []
+ return self._parse_xspf(xspf, playlist_id)
+
+ def _parse_xspf(self, playlist, playlist_id):
+ NS_MAP = {
+ 'xspf': 'http://xspf.org/ns/0/',
+ 's1': 'http://static.streamone.nl/player/ns/0',
+ }
+
+ entries = []
+ for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
+ title = xpath_text(
+ track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
+ description = xpath_text(
+ track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
+ thumbnail = xpath_text(
+ track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
+ duration = float_or_none(
+ xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
+
+ formats = [{
+ 'url': location.text,
+ 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
+ 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
+ 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
+ } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
+ self._sort_formats(formats)
+
+ entries.append({
+ 'id': playlist_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'formats': formats,
+ })
+ return entries
def _live_title(self, name):
""" Generate the title for a live video """
@@ -1065,6 +1242,12 @@ class InfoExtractor(object):
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
+ def _get_cookies(self, url):
+ """ Return a compat_cookies.SimpleCookie with the cookies for the url """
+ req = compat_urllib_request.Request(url)
+ self._downloader.cookiejar.add_cookie_header(req)
+ return compat_cookies.SimpleCookie(req.get_header('Cookie'))
+
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
@@ -1103,6 +1286,23 @@ class InfoExtractor(object):
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError("This method must be implemented by subclasses")
+ @staticmethod
+ def _merge_subtitle_items(subtitle_list1, subtitle_list2):
+ """ Merge subtitle items for one language. Items with duplicated URLs
+ will be dropped. """
+ list1_urls = set([item['url'] for item in subtitle_list1])
+ ret = list(subtitle_list1)
+ ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
+ return ret
+
+ @classmethod
+ def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
+ """ Merge two subtitle dictionaries, language by language. """
+ ret = dict(subtitle_dict1)
+ for lang in subtitle_dict2:
+ ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
+ return ret
+
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
@@ -1116,7 +1316,7 @@ class InfoExtractor(object):
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
- They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
+ They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index d1b6d7366..95952bc29 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -14,20 +14,40 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_request,
+ compat_urlparse,
)
from ..utils import (
ExtractorError,
bytes_to_intlist,
intlist_to_bytes,
+ int_or_none,
+ remove_end,
unified_strdate,
urlencode_postdata,
+ xpath_text,
)
from ..aes import (
aes_cbc_decrypt,
)
-class CrunchyrollIE(InfoExtractor):
+class CrunchyrollBaseIE(InfoExtractor):
+ def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
+ request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
+ else compat_urllib_request.Request(url_or_request))
+ # Accept-Language must be set explicitly to accept any language to avoid issues
+ # similar to https://github.com/rg3/youtube-dl/issues/6797.
+ # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction
+ # should be imposed or not (from what I can see it just takes the first language
+ # ignoring the priority and requires it to correspond the IP). By the way this causes
+ # Crunchyroll to not work in georestriction cases in some browsers that don't place
+ # the locale lang first in header. However allowing any language seems to workaround the issue.
+ request.add_header('Accept-Language', '*')
+ return super(CrunchyrollBaseIE, self)._download_webpage(
+ request, video_id, note, errnote, fatal, tries, timeout, encoding)
+
+
+class CrunchyrollIE(CrunchyrollBaseIE):
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)'
_NETRC_MACHINE = 'crunchyroll'
_TESTS = [{
@@ -235,7 +255,9 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
webpage_url = 'http://www.' + mobj.group('url')
webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage')
- note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='')
+ note_m = self._html_search_regex(
+ r'<div class="showmedia-trailer-notice">(.+?)</div>',
+ webpage, 'trailer-notice', default='')
if note_m:
raise ExtractorError(note_m)
@@ -245,15 +267,22 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
if msg.get('type') == 'error':
raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True)
+ if 'To view this, please log in to verify you are 18 or older.' in webpage:
+ self.raise_login_required()
+
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
video_title = re.sub(r' {2,}', ' ', video_title)
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
if not video_description:
video_description = None
- video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
+ video_upload_date = self._html_search_regex(
+ [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'],
+ webpage, 'video_upload_date', fatal=False, flags=re.DOTALL)
if video_upload_date:
video_upload_date = unified_strdate(video_upload_date)
- video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, 'video_uploader', fatal=False, flags=re.DOTALL)
+ video_uploader = self._html_search_regex(
+ r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', webpage,
+ 'video_uploader', fatal=False)
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
playerdata_req = compat_urllib_request.Request(playerdata_url)
@@ -279,13 +308,33 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
stream_info = streamdata.find('./{default}preload/stream_info')
video_url = stream_info.find('./host').text
video_play_path = stream_info.find('./file').text
- formats.append({
+ metadata = stream_info.find('./metadata')
+ format_info = {
+ 'format': video_format,
+ 'format_id': video_format,
+ 'height': int_or_none(xpath_text(metadata, './height')),
+ 'width': int_or_none(xpath_text(metadata, './width')),
+ }
+
+ if '.fplive.net/' in video_url:
+ video_url = re.sub(r'^rtmpe?://', 'http://', video_url.strip())
+ parsed_video_url = compat_urlparse.urlparse(video_url)
+ direct_video_url = compat_urlparse.urlunparse(parsed_video_url._replace(
+ netloc='v.lvlt.crcdn.net',
+ path='%s/%s' % (remove_end(parsed_video_url.path, '/'), video_play_path.split(':')[-1])))
+ if self._is_valid_url(direct_video_url, video_id, video_format):
+ format_info.update({
+ 'url': direct_video_url,
+ })
+ formats.append(format_info)
+ continue
+
+ format_info.update({
'url': video_url,
'play_path': video_play_path,
'ext': 'flv',
- 'format': video_format,
- 'format_id': video_format,
})
+ formats.append(format_info)
subtitles = self.extract_subtitles(video_id, webpage)
@@ -301,7 +350,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
}
-class CrunchyrollShowPlaylistIE(InfoExtractor):
+class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
IE_NAME = "crunchyroll:playlist"
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py
index 1a41c0db1..2d90b2224 100644
--- a/youtube_dl/extractor/dailymotion.py
+++ b/youtube_dl/extractor/dailymotion.py
@@ -13,8 +13,9 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ determine_ext,
int_or_none,
- orderedSet,
+ parse_iso8601,
str_to_int,
unescapeHTML,
)
@@ -28,10 +29,16 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
+ def _download_webpage_handle_no_ff(self, url, *args, **kwargs):
+ request = self._build_request(url)
+ return self._download_webpage_handle(request, *args, **kwargs)
+
+ def _download_webpage_no_ff(self, url, *args, **kwargs):
+ request = self._build_request(url)
+ return self._download_webpage(request, *args, **kwargs)
-class DailymotionIE(DailymotionBaseInfoExtractor):
- """Information Extractor for Dailymotion"""
+class DailymotionIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
@@ -50,10 +57,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'info_dict': {
'id': 'x2iuewm',
'ext': 'mp4',
- 'uploader': 'IGN',
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
- 'upload_date': '20150306',
+ 'description': 'Several come bundled with the Steam Controller.',
+ 'thumbnail': 're:^https?:.*\.(?:jpg|png)$',
'duration': 74,
+ 'timestamp': 1425657362,
+ 'upload_date': '20150306',
+ 'uploader': 'IGN',
+ 'uploader_id': 'xijv66',
+ 'age_limit': 0,
+ 'view_count': int,
+ 'comment_count': int,
}
},
# Vevo video
@@ -87,38 +101,106 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- url = 'https://www.dailymotion.com/video/%s' % video_id
- # Retrieve video webpage to extract further information
- request = self._build_request(url)
- webpage = self._download_webpage(request, video_id)
+ webpage = self._download_webpage_no_ff(
+ 'https://www.dailymotion.com/video/%s' % video_id, video_id)
+
+ age_limit = self._rta_search(webpage)
- # Extract URL, uploader and title from webpage
- self.report_extraction(video_id)
+ description = self._og_search_description(webpage) or self._html_search_meta(
+ 'description', webpage, 'description')
- # It may just embed a vevo video:
- m_vevo = re.search(
+ view_count = str_to_int(self._search_regex(
+ [r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"',
+ r'video_views_count[^>]+>\s+([\d\.,]+)'],
+ webpage, 'view count', fatal=False))
+ comment_count = int_or_none(self._search_regex(
+ r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
+ webpage, 'comment count', fatal=False))
+
+ player_v5 = self._search_regex(
+ r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);',
+ webpage, 'player v5', default=None)
+ if player_v5:
+ player = self._parse_json(player_v5, video_id)
+ metadata = player['metadata']
+ formats = []
+ for quality, media_list in metadata['qualities'].items():
+ for media in media_list:
+ media_url = media.get('url')
+ if not media_url:
+ continue
+ type_ = media.get('type')
+ if type_ == 'application/vnd.lumberjack.manifest':
+ continue
+ if type_ == 'application/x-mpegURL' or determine_ext(media_url) == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ media_url, video_id, 'mp4', m3u8_id='hls'))
+ else:
+ f = {
+ 'url': media_url,
+ 'format_id': quality,
+ }
+ m = re.search(r'H264-(?P<width>\d+)x(?P<height>\d+)', media_url)
+ if m:
+ f.update({
+ 'width': int(m.group('width')),
+ 'height': int(m.group('height')),
+ })
+ formats.append(f)
+ self._sort_formats(formats)
+
+ title = metadata['title']
+ duration = int_or_none(metadata.get('duration'))
+ timestamp = int_or_none(metadata.get('created_time'))
+ thumbnail = metadata.get('poster_url')
+ uploader = metadata.get('owner', {}).get('screenname')
+ uploader_id = metadata.get('owner', {}).get('id')
+
+ subtitles = {}
+ for subtitle_lang, subtitle in metadata.get('subtitles', {}).get('data', {}).items():
+ subtitles[subtitle_lang] = [{
+ 'ext': determine_ext(subtitle_url),
+ 'url': subtitle_url,
+ } for subtitle_url in subtitle.get('urls', [])]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'age_limit': age_limit,
+ 'view_count': view_count,
+ 'comment_count': comment_count,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
+
+ # vevo embed
+ vevo_id = self._search_regex(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
- webpage)
- if m_vevo is not None:
- vevo_id = m_vevo.group('id')
- self.to_screen('Vevo video detected: %s' % vevo_id)
- return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
+ webpage, 'vevo embed', default=None)
+ if vevo_id:
+ return self.url_result('vevo:%s' % vevo_id, 'Vevo')
- age_limit = self._rta_search(webpage)
+ # fallback old player
+ embed_page = self._download_webpage_no_ff(
+ 'https://www.dailymotion.com/embed/video/%s' % video_id,
+ video_id, 'Downloading embed page')
+
+ timestamp = parse_iso8601(self._html_search_meta(
+ 'video:release_date', webpage, 'upload date'))
+
+ info = self._parse_json(
+ self._search_regex(
+ r'var info = ({.*?}),$', embed_page,
+ 'video info', flags=re.MULTILINE),
+ video_id)
- video_upload_date = None
- mobj = re.search(r'<meta property="video:release_date" content="([0-9]{4})-([0-9]{2})-([0-9]{2}).+?"/>', webpage)
- if mobj is not None:
- video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
-
- embed_url = 'https://www.dailymotion.com/embed/video/%s' % video_id
- embed_request = self._build_request(embed_url)
- embed_page = self._download_webpage(
- embed_request, video_id, 'Downloading embed page')
- info = self._search_regex(r'var info = ({.*?}),$', embed_page,
- 'video info', flags=re.MULTILINE)
- info = json.loads(info)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
@@ -139,16 +221,11 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'width': width,
'height': height,
})
- if not formats:
- raise ExtractorError('Unable to extract video URL')
+ self._sort_formats(formats)
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
- view_count = str_to_int(self._search_regex(
- r'video_views_count[^>]+>\s+([\d\.,]+)',
- webpage, 'view count', fatal=False))
-
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
@@ -159,8 +236,9 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
- 'upload_date': video_upload_date,
+ 'timestamp': timestamp,
'title': title,
+ 'description': description,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
@@ -199,18 +277,26 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
}]
def _extract_entries(self, id):
- video_ids = []
+ video_ids = set()
+ processed_urls = set()
for pagenum in itertools.count(1):
- request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
- webpage = self._download_webpage(request,
- id, 'Downloading page %s' % pagenum)
+ page_url = self._PAGE_TEMPLATE % (id, pagenum)
+ webpage, urlh = self._download_webpage_handle_no_ff(
+ page_url, id, 'Downloading page %s' % pagenum)
+ if urlh.geturl() in processed_urls:
+ self.report_warning('Stopped at duplicated page %s, which is the same as %s' % (
+ page_url, urlh.geturl()), id)
+ break
- video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
+ processed_urls.add(urlh.geturl())
+
+ for video_id in re.findall(r'data-xid="(.+?)"', webpage):
+ if video_id not in video_ids:
+ yield self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
+ video_ids.add(video_id)
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
- return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
- for video_id in orderedSet(video_ids)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -227,7 +313,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
- _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?:(?:old/)?user/)?(?P<user>[^/]+)$'
+ _VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|#|video|playlist)/)(?:(?:old/)?user/)?(?P<user>[^/]+)'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
@@ -236,6 +322,17 @@ class DailymotionUserIE(DailymotionPlaylistIE):
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
+ }, {
+ 'url': 'http://www.dailymotion.com/user/UnderProject',
+ 'info_dict': {
+ 'id': 'UnderProject',
+ 'title': 'UnderProject',
+ },
+ 'playlist_mincount': 1800,
+ 'expected_warnings': [
+ 'Stopped at duplicated page',
+ ],
+ 'skip': 'Takes too long time',
}]
def _real_extract(self, url):
@@ -286,8 +383,7 @@ class DailymotionCloudIE(DailymotionBaseInfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- request = self._build_request(url)
- webpage = self._download_webpage(request, video_id)
+ webpage = self._download_webpage_no_ff(url, video_id)
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
diff --git a/youtube_dl/extractor/dcn.py b/youtube_dl/extractor/dcn.py
new file mode 100644
index 000000000..6f2fea5ff
--- /dev/null
+++ b/youtube_dl/extractor/dcn.py
@@ -0,0 +1,84 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class DCNIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887',
+ 'info_dict':
+ {
+ 'id': '17375',
+ 'ext': 'mp4',
+ 'title': 'رحلة العمر : الحلقة 1',
+ 'description': 'md5:0156e935d870acb8ef0a66d24070c6d6',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 2041,
+ 'timestamp': 1227504126,
+ 'upload_date': '20081124',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ request = compat_urllib_request.Request(
+ 'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id,
+ headers={'Origin': 'http://www.dcndigital.ae'})
+
+ video = self._download_json(request, video_id)
+ title = video.get('title_en') or video['title_ar']
+
+ webpage = self._download_webpage(
+ 'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
+ compat_urllib_parse.urlencode({
+ 'id': video['id'],
+ 'user_id': video['user_id'],
+ 'signature': video['signature'],
+ 'countries': 'Q0M=',
+ 'filter': 'DENY',
+ }), video_id)
+
+ m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url')
+ formats = self._extract_m3u8_formats(
+ m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
+
+ rtsp_url = self._search_regex(
+ r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False)
+ if rtsp_url:
+ formats.append({
+ 'url': rtsp_url,
+ 'format_id': 'rtsp',
+ })
+
+ self._sort_formats(formats)
+
+ img = video.get('img')
+ thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None
+ duration = int_or_none(video.get('duration'))
+ description = video.get('description_en') or video.get('description_ar')
+ timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ')
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/dhm.py b/youtube_dl/extractor/dhm.py
index 3ed1f1663..44e0c5d4d 100644
--- a/youtube_dl/extractor/dhm.py
+++ b/youtube_dl/extractor/dhm.py
@@ -1,10 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import (
- xpath_text,
- parse_duration,
-)
+from ..utils import parse_duration
class DHMIE(InfoExtractor):
@@ -34,24 +31,14 @@ class DHMIE(InfoExtractor):
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ playlist_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
+ webpage = self._download_webpage(url, playlist_id)
playlist_url = self._search_regex(
r"file\s*:\s*'([^']+)'", webpage, 'playlist url')
- playlist = self._download_xml(playlist_url, video_id)
-
- track = playlist.find(
- './{http://xspf.org/ns/0/}trackList/{http://xspf.org/ns/0/}track')
-
- video_url = xpath_text(
- track, './{http://xspf.org/ns/0/}location',
- 'video url', fatal=True)
- thumbnail = xpath_text(
- track, './{http://xspf.org/ns/0/}image',
- 'thumbnail')
+ entries = self._extract_xspf_playlist(playlist_url, playlist_id)
title = self._search_regex(
[r'dc:title="([^"]+)"', r'<title> &raquo;([^<]+)</title>'],
@@ -63,11 +50,10 @@ class DHMIE(InfoExtractor):
r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)',
webpage, 'duration', default=None))
- return {
- 'id': video_id,
- 'url': video_url,
+ entries[0].update({
'title': title,
'description': description,
'duration': duration,
- 'thumbnail': thumbnail,
- }
+ })
+
+ return self.playlist_result(entries, playlist_id)
diff --git a/youtube_dl/extractor/dumpert.py b/youtube_dl/extractor/dumpert.py
index 999fb5620..1f00386fe 100644
--- a/youtube_dl/extractor/dumpert.py
+++ b/youtube_dl/extractor/dumpert.py
@@ -9,8 +9,8 @@ from ..utils import qualities
class DumpertIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?dumpert\.nl/mediabase/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)'
+ _TESTS = [{
'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/',
'md5': '1b9318d7d5054e7dcb9dc7654f21d643',
'info_dict': {
@@ -20,11 +20,15 @@ class DumpertIE(InfoExtractor):
'description': 'Niet schrikken hoor',
'thumbnail': 're:^https?://.*\.jpg$',
}
- }
+ }, {
+ 'url': 'http://www.dumpert.nl/embed/6675421/dc440fe7/',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
+ url = 'https://www.dumpert.nl/mediabase/' + video_id
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'nsfw=1; cpc=10')
webpage = self._download_webpage(req, video_id)
diff --git a/youtube_dl/extractor/eagleplatform.py b/youtube_dl/extractor/eagleplatform.py
index 688dfc2f7..a1ee51568 100644
--- a/youtube_dl/extractor/eagleplatform.py
+++ b/youtube_dl/extractor/eagleplatform.py
@@ -79,7 +79,7 @@ class EaglePlatformIE(InfoExtractor):
age_limit = 0 if age_restriction == 'allow_all' else 18
m3u8_data = self._download_json(
- media['sources']['secure_m3u8']['auto'],
+ self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:'),
video_id, 'Downloading m3u8 JSON')
formats = self._extract_m3u8_formats(
diff --git a/youtube_dl/extractor/eroprofile.py b/youtube_dl/extractor/eroprofile.py
index 316033cf1..7fcd0151d 100644
--- a/youtube_dl/extractor/eroprofile.py
+++ b/youtube_dl/extractor/eroprofile.py
@@ -71,8 +71,7 @@ class EroProfileIE(InfoExtractor):
m = re.search(r'You must be logged in to view this video\.', webpage)
if m:
- raise ExtractorError(
- 'This video requires login. Please specify a username and password and try again.', expected=True)
+ self.raise_login_required('This video requires login')
video_id = self._search_regex(
[r"glbUpdViews\s*\('\d*','(\d+)'", r'p/report/video/(\d+)'],
diff --git a/youtube_dl/extractor/esri.py b/youtube_dl/extractor/esri.py
new file mode 100644
index 000000000..bf5d2019f
--- /dev/null
+++ b/youtube_dl/extractor/esri.py
@@ -0,0 +1,74 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_urlparse
+from ..utils import (
+ int_or_none,
+ parse_filesize,
+ unified_strdate,
+)
+
+
+class EsriVideoIE(InfoExtractor):
+ _VALID_URL = r'https?://video\.esri\.com/watch/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'https://video.esri.com/watch/1124/arcgis-online-_dash_-developing-applications',
+ 'md5': 'd4aaf1408b221f1b38227a9bbaeb95bc',
+ 'info_dict': {
+ 'id': '1124',
+ 'ext': 'mp4',
+ 'title': 'ArcGIS Online - Developing Applications',
+ 'description': 'Jeremy Bartley demonstrates how to develop applications with ArcGIS Online.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 185,
+ 'upload_date': '20120419',
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ formats = []
+ for width, height, content in re.findall(
+ r'(?s)<li><strong>(\d+)x(\d+):</strong>(.+?)</li>', webpage):
+ for video_url, ext, filesize in re.findall(
+ r'<a[^>]+href="([^"]+)">([^<]+)&nbsp;\(([^<]+)\)</a>', content):
+ formats.append({
+ 'url': compat_urlparse.urljoin(url, video_url),
+ 'ext': ext.lower(),
+ 'format_id': '%s-%s' % (ext.lower(), height),
+ 'width': int(width),
+ 'height': int(height),
+ 'filesize_approx': parse_filesize(filesize),
+ })
+ self._sort_formats(formats)
+
+ title = self._html_search_meta('title', webpage, 'title')
+ description = self._html_search_meta(
+ 'description', webpage, 'description', fatal=False)
+
+ thumbnail = self._html_search_meta('thumbnail', webpage, 'thumbnail', fatal=False)
+ if thumbnail:
+ thumbnail = re.sub(r'_[st]\.jpg$', '_x.jpg', thumbnail)
+
+ duration = int_or_none(self._search_regex(
+ [r'var\s+videoSeconds\s*=\s*(\d+)', r"'duration'\s*:\s*(\d+)"],
+ webpage, 'duration', fatal=False))
+
+ upload_date = unified_strdate(self._html_search_meta(
+ 'last-modified', webpage, 'upload date', fatal=None))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'upload_date': upload_date,
+ 'formats': formats
+ }
diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index e17bb9aea..178a7ca4c 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -17,6 +17,8 @@ from ..utils import (
int_or_none,
limit_length,
urlencode_postdata,
+ get_element_by_id,
+ clean_html,
)
@@ -42,6 +44,7 @@ class FacebookIE(InfoExtractor):
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
+ 'uploader': 'Tennis on Facebook',
}
}, {
'note': 'Video without discernible title',
@@ -50,6 +53,7 @@ class FacebookIE(InfoExtractor):
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
+ 'uploader': 'Asif Nawab Butt',
},
'expected_warnings': [
'title'
@@ -161,6 +165,7 @@ class FacebookIE(InfoExtractor):
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
+ uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
return {
'id': video_id,
@@ -168,4 +173,5 @@ class FacebookIE(InfoExtractor):
'formats': formats,
'duration': int_or_none(video_data.get('video_duration')),
'thumbnail': video_data.get('thumbnail_src'),
+ 'uploader': uploader,
}
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index 1ccc1a964..a406945e8 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -10,12 +10,13 @@ from ..compat import (
compat_urlparse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
)
class FC2IE(InfoExtractor):
- _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)?content/(?P<id>[^/]+)'
+ _VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)*content/(?P<id>[^/]+)'
IE_NAME = 'fc2'
_NETRC_MACHINE = 'fc2'
_TESTS = [{
@@ -37,6 +38,9 @@ class FC2IE(InfoExtractor):
'password': '(snip)',
'skip': 'requires actual password'
}
+ }, {
+ 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',
+ 'only_matching': True,
}]
def _login(self):
@@ -52,10 +56,7 @@ class FC2IE(InfoExtractor):
'Submit': ' Login ',
}
- # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
- # chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
- login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
+ login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
request = compat_urllib_request.Request(
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
@@ -80,13 +81,13 @@ class FC2IE(InfoExtractor):
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
- refer = url.replace('/content/', '/a/content/')
+ refer = url.replace('/content/', '/a/content/') if '/a/content/' not in url else url
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
info_url = (
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
- format(video_id, mimi, compat_urllib_request.quote(refer, safe='').replace('.', '%2E')))
+ format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page')
diff --git a/youtube_dl/extractor/folketinget.py b/youtube_dl/extractor/folketinget.py
index 0fb29de75..75399fa7d 100644
--- a/youtube_dl/extractor/folketinget.py
+++ b/youtube_dl/extractor/folketinget.py
@@ -30,6 +30,10 @@ class FolketingetIE(InfoExtractor):
'upload_date': '20141120',
'duration': 3960,
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py
index b2284ab01..3bb4f6239 100644
--- a/youtube_dl/extractor/fourtube.py
+++ b/youtube_dl/extractor/fourtube.py
@@ -32,6 +32,7 @@ class FourTubeIE(InfoExtractor):
'view_count': int,
'like_count': int,
'categories': list,
+ 'age_limit': 18,
}
}
diff --git a/youtube_dl/extractor/foxnews.py b/youtube_dl/extractor/foxnews.py
index 917f76b1e..3a4a59135 100644
--- a/youtube_dl/extractor/foxnews.py
+++ b/youtube_dl/extractor/foxnews.py
@@ -1,5 +1,7 @@
from __future__ import unicode_literals
+import re
+
from .common import InfoExtractor
from ..utils import (
parse_iso8601,
@@ -8,7 +10,8 @@ from ..utils import (
class FoxNewsIE(InfoExtractor):
- _VALID_URL = r'https?://video\.foxnews\.com/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
+ IE_DESC = 'Fox News and Fox Business Video'
+ _VALID_URL = r'https?://(?P<host>video\.fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)'
_TESTS = [
{
'url': 'http://video.foxnews.com/v/3937480/frozen-in-time/#sp=show-clips',
@@ -42,13 +45,19 @@ class FoxNewsIE(InfoExtractor):
'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
'only_matching': True,
},
+ {
+ 'url': 'http://video.foxbusiness.com/v/4442309889001',
+ 'only_matching': True,
+ },
]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+ host = mobj.group('host')
video = self._download_json(
- 'http://video.foxnews.com/v/feed/video/%s.js?template=fox' % video_id, video_id)
+ 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id), video_id)
item = video['channel']['item']
title = item['title']
diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py
index b2c984bf2..129984a5f 100644
--- a/youtube_dl/extractor/francetv.py
+++ b/youtube_dl/extractor/francetv.py
@@ -6,15 +6,11 @@ import re
import json
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse_urlparse,
- compat_urlparse,
-)
+from ..compat import compat_urlparse
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
- float_or_none,
parse_duration,
determine_ext,
)
@@ -59,12 +55,12 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
# See https://github.com/rg3/youtube-dl/issues/3963
# m3u8 urls work fine
continue
- video_url_parsed = compat_urllib_parse_urlparse(video_url)
f4m_url = self._download_webpage(
- 'http://hdfauth.francetv.fr/esi/TA?url=%s' % video_url_parsed.path,
+ 'http://hdfauth.francetv.fr/esi/TA?url=%s' % video_url,
video_id, 'Downloading f4m manifest token', fatal=False)
if f4m_url:
- formats.extend(self._extract_f4m_formats(f4m_url, video_id, 1, format_id))
+ formats.extend(self._extract_f4m_formats(
+ f4m_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, 1, format_id))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4', m3u8_id=format_id))
elif video_url.startswith('rtmp'):
@@ -82,12 +78,17 @@ class FranceTVBaseInfoExtractor(InfoExtractor):
})
self._sort_formats(formats)
+ title = info['titre']
+ subtitle = info.get('sous_titre')
+ if subtitle:
+ title += ' - %s' % subtitle
+
return {
'id': video_id,
- 'title': info['titre'],
+ 'title': title,
'description': clean_html(info['synopsis']),
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
- 'duration': float_or_none(info.get('real_duration'), 1000) or parse_duration(info['duree']),
+ 'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']),
'timestamp': int_or_none(info['diffusion']['timestamp']),
'formats': formats,
}
@@ -160,11 +161,21 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
class FranceTVIE(FranceTVBaseInfoExtractor):
IE_NAME = 'francetv'
IE_DESC = 'France 2, 3, 4, 5 and Ô'
- _VALID_URL = r'''(?x)https?://www\.france[2345o]\.fr/
- (?:
- emissions/.*?/(videos|emissions)/(?P<id>[^/?]+)
- | (emissions?|jt)/(?P<key>[^/?]+)
- )'''
+ _VALID_URL = r'''(?x)
+ https?://
+ (?:
+ (?:www\.)?france[2345o]\.fr/
+ (?:
+ emissions/[^/]+/(?:videos|diffusions)|
+ emission/[^/]+|
+ videos|
+ jt
+ )
+ /|
+ embed\.francetv\.fr/\?ue=
+ )
+ (?P<id>[^/?]+)
+ '''
_TESTS = [
# france2
@@ -208,37 +219,59 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
},
# france5
{
- 'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
- 'md5': '78f0f4064f9074438e660785bbf2c5d9',
+ 'url': 'http://www.france5.fr/emissions/c-a-dire/videos/quels_sont_les_enjeux_de_cette_rentree_politique__31-08-2015_908948?onglet=tous&page=1',
+ 'md5': 'f6c577df3806e26471b3d21631241fd0',
'info_dict': {
- 'id': '108961659',
+ 'id': '123327454',
'ext': 'flv',
- 'title': 'C à dire ?!',
- 'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
- 'upload_date': '20140915',
- 'timestamp': 1410795000,
+ 'title': 'C à dire ?! - Quels sont les enjeux de cette rentrée politique ?',
+ 'description': 'md5:4a0d5cb5dce89d353522a84462bae5a4',
+ 'upload_date': '20150831',
+ 'timestamp': 1441035120,
},
},
# franceo
{
- 'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013',
- 'md5': '52f0bfe202848b15915a2f39aaa8981b',
+ 'url': 'http://www.franceo.fr/jt/info-soir/18-07-2015',
+ 'md5': '47d5816d3b24351cdce512ad7ab31da8',
+ 'info_dict': {
+ 'id': '125377621',
+ 'ext': 'flv',
+ 'title': 'Infô soir',
+ 'description': 'md5:01b8c6915a3d93d8bbbd692651714309',
+ 'upload_date': '20150718',
+ 'timestamp': 1437241200,
+ 'duration': 414,
+ },
+ },
+ {
+ # francetv embed
+ 'url': 'http://embed.francetv.fr/?ue=8d7d3da1e3047c42ade5a5d7dfd3fc87',
'info_dict': {
- 'id': '108634970',
+ 'id': 'EV_30231',
'ext': 'flv',
- 'title': 'Infô Afrique',
- 'description': 'md5:ebf346da789428841bee0fd2a935ea55',
- 'upload_date': '20140915',
- 'timestamp': 1410822000,
+ 'title': 'Alcaline, le concert avec Calogero',
+ 'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
+ 'upload_date': '20150226',
+ 'timestamp': 1424989860,
+ 'duration': 5400,
},
},
+ {
+ 'url': 'http://www.france4.fr/emission/highlander/diffusion-du-17-07-2015-04h05',
+ 'only_matching': True,
+ },
+ {
+ 'url': 'http://www.franceo.fr/videos/125377617',
+ 'only_matching': True,
+ }
]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- webpage = self._download_webpage(url, mobj.group('key') or mobj.group('id'))
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
video_id, catalogue = self._html_search_regex(
- r'href="http://videos\.francetv\.fr/video/([^@]+@[^"]+)"',
+ r'href="http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
webpage, 'video ID').split('@')
return self._extract_video(video_id, catalogue)
diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py
index dd87257c4..f5f13689c 100644
--- a/youtube_dl/extractor/funnyordie.py
+++ b/youtube_dl/extractor/funnyordie.py
@@ -53,7 +53,7 @@ class FunnyOrDieIE(InfoExtractor):
for bitrate in bitrates:
for link in links:
formats.append({
- 'url': '%s%d.%s' % (link[0], bitrate, link[1]),
+ 'url': self._proto_relative_url('%s%d.%s' % (link[0], bitrate, link[1])),
'format_id': '%s-%d' % (link[1], bitrate),
'vbr': bitrate,
})
diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py
index 43f916412..a6834db43 100644
--- a/youtube_dl/extractor/gdcvault.py
+++ b/youtube_dl/extractor/gdcvault.py
@@ -7,7 +7,10 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
-from ..utils import remove_end
+from ..utils import (
+ remove_end,
+ HEADRequest,
+)
class GDCVaultIE(InfoExtractor):
@@ -73,10 +76,20 @@ class GDCVaultIE(InfoExtractor):
return video_formats
def _parse_flv(self, xml_description):
- video_formats = []
+ formats = []
akamai_url = xml_description.find('./metadata/akamaiHost').text
+ audios = xml_description.find('./metadata/audios')
+ if audios is not None:
+ for audio in audios:
+ formats.append({
+ 'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
+ 'play_path': remove_end(audio.get('url'), '.flv'),
+ 'ext': 'flv',
+ 'vcodec': 'none',
+ 'format_id': audio.get('code'),
+ })
slide_video_path = xml_description.find('./metadata/slideVideo').text
- video_formats.append({
+ formats.append({
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
'play_path': remove_end(slide_video_path, '.flv'),
'ext': 'flv',
@@ -86,7 +99,7 @@ class GDCVaultIE(InfoExtractor):
'format_id': 'slides',
})
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
- video_formats.append({
+ formats.append({
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
'play_path': remove_end(speaker_video_path, '.flv'),
'ext': 'flv',
@@ -95,7 +108,7 @@ class GDCVaultIE(InfoExtractor):
'preference': -1,
'format_id': 'speaker',
})
- return video_formats
+ return formats
def _login(self, webpage_url, display_id):
(username, password) = self._get_login_info()
@@ -133,16 +146,18 @@ class GDCVaultIE(InfoExtractor):
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
start_page, 'url', default=None)
if direct_url:
- video_url = 'http://www.gdcvault.com/' + direct_url
title = self._html_search_regex(
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
+ video_url = 'http://www.gdcvault.com' + direct_url
+ # resolve the url so that we can detect the correct extension
+ head = self._request_webpage(HEADRequest(video_url), video_id)
+ video_url = head.geturl()
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
- 'ext': 'flv',
'title': title,
}
@@ -168,8 +183,8 @@ class GDCVaultIE(InfoExtractor):
# Fallback to the older format
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
- xml_decription_url = xml_root + 'xml/' + xml_name
- xml_description = self._download_xml(xml_decription_url, display_id)
+ xml_description_url = xml_root + 'xml/' + xml_name
+ xml_description = self._download_xml(xml_description_url, display_id)
video_title = xml_description.find('./metadata/title').text
video_formats = self._parse_mp4(xml_description)
diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py
index a62287e50..ec748ed9f 100644
--- a/youtube_dl/extractor/generic.py
+++ b/youtube_dl/extractor/generic.py
@@ -48,6 +48,7 @@ from .vimeo import VimeoIE
from .dailymotion import DailymotionCloudIE
from .onionstudios import OnionStudiosIE
from .snagfilms import SnagFilmsEmbedIE
+from .screenwavemedia import ScreenwaveMediaIE
class GenericIE(InfoExtractor):
@@ -130,6 +131,89 @@ class GenericIE(InfoExtractor):
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
+ # SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
+ {
+ 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
+ 'info_dict': {
+ 'id': 'smil',
+ 'ext': 'mp4',
+ 'title': 'Automatics, robotics and biocybernetics',
+ 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
+ 'formats': 'mincount:16',
+ 'subtitles': 'mincount:1',
+ },
+ 'params': {
+ 'force_generic_extractor': True,
+ 'skip_download': True,
+ },
+ },
+ # SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
+ {
+ 'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
+ 'info_dict': {
+ 'id': 'hds',
+ 'ext': 'flv',
+ 'title': 'hds',
+ 'formats': 'mincount:1',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # SMIL from https://www.restudy.dk/video/play/id/1637
+ {
+ 'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
+ 'info_dict': {
+ 'id': 'video_1637',
+ 'ext': 'flv',
+ 'title': 'video_1637',
+ 'formats': 'mincount:3',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
+ {
+ 'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
+ 'info_dict': {
+ 'id': 'smil-service',
+ 'ext': 'flv',
+ 'title': 'smil-service',
+ 'formats': 'mincount:1',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
+ {
+ 'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
+ 'info_dict': {
+ 'id': '4719370',
+ 'ext': 'mp4',
+ 'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
+ 'formats': 'mincount:3',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ # XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
+ {
+ 'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
+ 'info_dict': {
+ 'id': 'mZlp2ctYIUEB',
+ 'ext': 'mp4',
+ 'title': 'Tikibad ontruimd wegens brand',
+ 'description': 'md5:05ca046ff47b931f9b04855015e163a4',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 33,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
@@ -236,6 +320,19 @@ class GenericIE(InfoExtractor):
},
'add_ie': ['Ooyala'],
},
+ {
+ # ooyala video embedded with http://player.ooyala.com/iframe.js
+ 'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
+ 'info_dict': {
+ 'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
+ 'ext': 'mp4',
+ 'title': '"Steve Jobs: Man in the Machine" trailer',
+ 'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
# multiple ooyala embeds on SBN network websites
{
'url': 'http://www.sbnation.com/college-football-recruiting/2015/2/3/7970291/national-signing-day-rationalizations-itll-be-ok-itll-be-ok',
@@ -276,14 +373,6 @@ class GenericIE(InfoExtractor):
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
},
- # BBC iPlayer embeds
- {
- 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/posts/BUGGER',
- 'info_dict': {
- 'title': 'BBC - Blogs - Adam Curtis - BUGGER',
- },
- 'playlist_mincount': 18,
- },
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
@@ -407,6 +496,26 @@ class GenericIE(InfoExtractor):
'skip_download': 'Requires rtmpdump'
}
},
+ # francetv embed
+ {
+ 'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
+ 'info_dict': {
+ 'id': 'EV_30231',
+ 'ext': 'mp4',
+ 'title': 'Alcaline, le concert avec Calogero',
+ 'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
+ 'upload_date': '20150226',
+ 'timestamp': 1424989860,
+ 'duration': 5400,
+ },
+ 'params': {
+ # m3u8 downloads
+ 'skip_download': True,
+ },
+ 'expected_warnings': [
+ 'Forbidden'
+ ]
+ },
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
@@ -893,6 +1002,16 @@ class GenericIE(InfoExtractor):
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
+ },
+ # ScreenwaveMedia embed
+ {
+ 'url': 'http://www.thecinemasnob.com/the-cinema-snob/a-nightmare-on-elm-street-2-freddys-revenge1',
+ 'md5': '24ace5baba0d35d55c6810b51f34e9e0',
+ 'info_dict': {
+ 'id': 'cinemasnob-55d26273809dd',
+ 'ext': 'mp4',
+ 'title': 'cinemasnob',
+ },
}
]
@@ -1098,11 +1217,15 @@ class GenericIE(InfoExtractor):
self.report_extraction(video_id)
- # Is it an RSS feed?
+ # Is it an RSS feed, a SMIL file or a XSPF playlist?
try:
doc = parse_xml(webpage)
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
+ elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
+ return self._parse_smil(doc, url, video_id)
+ elif doc.tag == '{http://xspf.org/ns/0/}playlist':
+ return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
except compat_xml_parse_error:
pass
@@ -1176,6 +1299,12 @@ class GenericIE(InfoExtractor):
if vimeo_url is not None:
return self.url_result(vimeo_url)
+ vid_me_embed_url = self._search_regex(
+ r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
+ webpage, 'vid.me embed', default=None)
+ if vid_me_embed_url is not None:
+ return self.url_result(vid_me_embed_url, 'Vidme')
+
# Look for embedded YouTube player
matches = re.findall(r'''(?x)
(?:
@@ -1302,7 +1431,7 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
- mobj = (re.search(r'player\.ooyala\.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
+ mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
@@ -1431,6 +1560,13 @@ class GenericIE(InfoExtractor):
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
+ # Look for embedded francetv player
+ mobj = re.search(
+ r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
+ webpage)
+ if mobj is not None:
+ return self.url_result(mobj.group('url'))
+
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
@@ -1593,6 +1729,11 @@ class GenericIE(InfoExtractor):
if snagfilms_url:
return self.url_result(snagfilms_url)
+ # Look for ScreenwaveMedia embeds
+ mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage)
+ if mobj is not None:
+ return self.url_result(unescapeHTML(mobj.group('url')), 'ScreenwaveMedia')
+
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
@@ -1630,7 +1771,7 @@ class GenericIE(InfoExtractor):
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
- r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
+ r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
@@ -1656,7 +1797,7 @@ class GenericIE(InfoExtractor):
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
# HTML5 video
- found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
+ found = re.findall(r'(?s)<(?:video|audio)[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
@@ -1669,7 +1810,7 @@ class GenericIE(InfoExtractor):
if refresh_header:
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
- new_url = compat_urlparse.urljoin(url, found.group(1))
+ new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
'_type': 'url',
@@ -1691,7 +1832,8 @@ class GenericIE(InfoExtractor):
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
- if determine_ext(video_url) == 'smil':
+ ext = determine_ext(video_url)
+ if ext == 'smil':
entries.append({
'id': video_id,
'formats': self._extract_smil_formats(video_url, video_id),
@@ -1699,6 +1841,8 @@ class GenericIE(InfoExtractor):
'title': video_title,
'age_limit': age_limit,
})
+ elif ext == 'xspf':
+ return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
else:
entries.append({
'id': video_id,
diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py
index 8a95793ca..33d6432a6 100644
--- a/youtube_dl/extractor/globo.py
+++ b/youtube_dl/extractor/globo.py
@@ -13,6 +13,7 @@ from ..compat import (
from ..utils import (
ExtractorError,
float_or_none,
+ int_or_none,
)
@@ -359,13 +360,8 @@ class GloboIE(InfoExtractor):
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
title = video['title']
- duration = float_or_none(video['duration'], 1000)
- like_count = video['likes']
- uploader = video['channel']
- uploader_id = video['channel_id']
formats = []
-
for resource in video['resources']:
resource_id = resource.get('_id')
if not resource_id:
@@ -407,6 +403,11 @@ class GloboIE(InfoExtractor):
self._sort_formats(formats)
+ duration = float_or_none(video.get('duration'), 1000)
+ like_count = int_or_none(video.get('likes'))
+ uploader = video.get('channel')
+ uploader_id = video.get('channel_id')
+
return {
'id': video_id,
'title': title,
diff --git a/youtube_dl/extractor/gorillavid.py b/youtube_dl/extractor/gorillavid.py
index f006f0cb1..d23e3eac1 100644
--- a/youtube_dl/extractor/gorillavid.py
+++ b/youtube_dl/extractor/gorillavid.py
@@ -10,15 +10,16 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ encode_dict,
int_or_none,
)
class GorillaVidIE(InfoExtractor):
- IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in and realvid.net'
+ IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net and filehoot.com'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
- (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net))/
+ (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com))/
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
'''
@@ -67,13 +68,22 @@ class GorillaVidIE(InfoExtractor):
}, {
'url': 'http://movpod.in/0wguyyxi1yca',
'only_matching': True,
+ }, {
+ 'url': 'http://filehoot.com/3ivfabn7573c.html',
+ 'info_dict': {
+ 'id': '3ivfabn7573c',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4',
+ 'thumbnail': 're:http://.*\.jpg',
+ }
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
- webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
+ url = 'http://%s/%s' % (mobj.group('host'), video_id)
+ webpage = self._download_webpage(url, video_id)
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
@@ -87,7 +97,7 @@ class GorillaVidIE(InfoExtractor):
if countdown:
self._sleep(countdown, video_id)
- post = compat_urllib_parse.urlencode(fields)
+ post = compat_urllib_parse.urlencode(encode_dict(fields))
req = compat_urllib_request.Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
@@ -95,7 +105,7 @@ class GorillaVidIE(InfoExtractor):
webpage = self._download_webpage(req, video_id, 'Downloading video page')
title = self._search_regex(
- [r'style="z-index: [0-9]+;">([^<]+)</span>', r'>Watch (.+) '],
+ [r'style="z-index: [0-9]+;">([^<]+)</span>', r'<td nowrap>([^<]+)</td>', r'>Watch (.+) '],
webpage, 'title', default=None) or self._og_search_title(webpage)
video_url = self._search_regex(
r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url')
diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py
index d692ea79a..70c8ca64e 100644
--- a/youtube_dl/extractor/imgur.py
+++ b/youtube_dl/extractor/imgur.py
@@ -13,7 +13,7 @@ from ..utils import (
class ImgurIE(InfoExtractor):
- _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)'
+ _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!gallery)(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'https://i.imgur.com/A61SaA1.gifv',
@@ -97,3 +97,28 @@ class ImgurIE(InfoExtractor):
'description': self._og_search_description(webpage),
'title': self._og_search_title(webpage),
}
+
+
+class ImgurAlbumIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:i\.)?imgur\.com/gallery/(?P<id>[a-zA-Z0-9]+)'
+
+ _TEST = {
+ 'url': 'http://imgur.com/gallery/Q95ko',
+ 'info_dict': {
+ 'id': 'Q95ko',
+ },
+ 'playlist_count': 25,
+ }
+
+ def _real_extract(self, url):
+ album_id = self._match_id(url)
+
+ album_images = self._download_json(
+ 'http://imgur.com/gallery/%s/album_images/hit.json?all=true' % album_id,
+ album_id)['data']['images']
+
+ entries = [
+ self.url_result('http://imgur.com/%s' % image['hash'])
+ for image in album_images if image.get('hash')]
+
+ return self.playlist_result(entries, album_id)
diff --git a/youtube_dl/extractor/indavideo.py b/youtube_dl/extractor/indavideo.py
new file mode 100644
index 000000000..12fb5e8e1
--- /dev/null
+++ b/youtube_dl/extractor/indavideo.py
@@ -0,0 +1,142 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ int_or_none,
+ parse_age_limit,
+ parse_iso8601,
+)
+
+
+class IndavideoEmbedIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:(?:embed\.)?indavideo\.hu/player/video/|assets\.indavideo\.hu/swf/player\.swf\?.*\b(?:v(?:ID|id))=)(?P<id>[\da-f]+)'
+ _TESTS = [{
+ 'url': 'http://indavideo.hu/player/video/1bdc3c6d80/',
+ 'md5': 'f79b009c66194acacd40712a6778acfa',
+ 'info_dict': {
+ 'id': '1837039',
+ 'ext': 'mp4',
+ 'title': 'Cicatánc',
+ 'description': '',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'cukiajanlo',
+ 'uploader_id': '83729',
+ 'timestamp': 1439193826,
+ 'upload_date': '20150810',
+ 'duration': 72,
+ 'age_limit': 0,
+ 'tags': ['tánc', 'cica', 'cuki', 'cukiajanlo', 'newsroom'],
+ },
+ }, {
+ 'url': 'http://embed.indavideo.hu/player/video/1bdc3c6d80?autostart=1&hide=1',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://assets.indavideo.hu/swf/player.swf?v=fe25e500&vID=1bdc3c6d80&autostart=1&hide=1&i=1',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ video = self._download_json(
+ 'http://amfphp.indavideo.hu/SYm0json.php/player.playerHandler.getVideoData/%s' % video_id,
+ video_id)['data']
+
+ title = video['title']
+
+ video_urls = video.get('video_files', [])
+ video_file = video.get('video_file')
+ if video:
+ video_urls.append(video_file)
+ video_urls = list(set(video_urls))
+
+ video_prefix = video_urls[0].rsplit('/', 1)[0]
+
+ for flv_file in video.get('flv_files', []):
+ flv_url = '%s/%s' % (video_prefix, flv_file)
+ if flv_url not in video_urls:
+ video_urls.append(flv_url)
+
+ formats = [{
+ 'url': video_url,
+ 'height': self._search_regex(r'\.(\d{3,4})\.mp4$', video_url, 'height', default=None),
+ } for video_url in video_urls]
+ self._sort_formats(formats)
+
+ timestamp = video.get('date')
+ if timestamp:
+ # upload date is in CEST
+ timestamp = parse_iso8601(timestamp + ' +0200', ' ')
+
+ thumbnails = [{
+ 'url': self._proto_relative_url(thumbnail)
+ } for thumbnail in video.get('thumbnails', [])]
+
+ tags = [tag['title'] for tag in video.get('tags', [])]
+
+ return {
+ 'id': video.get('id') or video_id,
+ 'title': title,
+ 'description': video.get('description'),
+ 'thumbnails': thumbnails,
+ 'uploader': video.get('user_name'),
+ 'uploader_id': video.get('user_id'),
+ 'timestamp': timestamp,
+ 'duration': int_or_none(video.get('length')),
+ 'age_limit': parse_age_limit(video.get('age_limit')),
+ 'tags': tags,
+ 'formats': formats,
+ }
+
+
+class IndavideoIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:.+?\.)?indavideo\.hu/video/(?P<id>[^/#?]+)'
+ _TESTS = [{
+ 'url': 'http://indavideo.hu/video/Vicces_cica_1',
+ 'md5': '8c82244ba85d2a2310275b318eb51eac',
+ 'info_dict': {
+ 'id': '1335611',
+ 'display_id': 'Vicces_cica_1',
+ 'ext': 'mp4',
+ 'title': 'Vicces cica',
+ 'description': 'Játszik a tablettel. :D',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'Jet_Pack',
+ 'uploader_id': '491217',
+ 'timestamp': 1390821212,
+ 'upload_date': '20140127',
+ 'duration': 7,
+ 'age_limit': 0,
+ 'tags': ['vicces', 'macska', 'cica', 'ügyes', 'nevetés', 'játszik', 'Cukiság', 'Jet_Pack'],
+ },
+ }, {
+ 'url': 'http://index.indavideo.hu/video/2015_0728_beregszasz',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://auto.indavideo.hu/video/Sajat_utanfutoban_a_kis_tacsko',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://erotika.indavideo.hu/video/Amator_tini_punci',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://film.indavideo.hu/video/f_hrom_nagymamm_volt',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://palyazat.indavideo.hu/video/Embertelen_dal_Dodgem_egyuttes',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ display_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, display_id)
+ embed_url = self._search_regex(
+ r'<link[^>]+rel="video_src"[^>]+href="(.+?)"', webpage, 'embed url')
+
+ return {
+ '_type': 'url_transparent',
+ 'ie_key': 'IndavideoEmbed',
+ 'url': embed_url,
+ 'display_id': display_id,
+ }
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
index 0f6707d7c..393e67e35 100644
--- a/youtube_dl/extractor/iqiyi.py
+++ b/youtube_dl/extractor/iqiyi.py
@@ -3,19 +3,13 @@ from __future__ import unicode_literals
import hashlib
import math
-import os.path
import random
-import re
import time
import uuid
-import zlib
from .common import InfoExtractor
from ..compat import compat_urllib_parse
-from ..utils import (
- ExtractorError,
- url_basename,
-)
+from ..utils import ExtractorError
class IqiyiIE(InfoExtractor):
@@ -39,62 +33,57 @@ class IqiyiIE(InfoExtractor):
'title': '名侦探柯南第752集',
},
'playlist': [{
- 'md5': '7e49376fecaffa115d951634917fe105',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part1',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
- 'md5': '41b75ba13bb7ac0e411131f92bc4f6ca',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part2',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
- 'md5': '0cee1dd0a3d46a83e71e2badeae2aab0',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part3',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
- 'md5': '4f8ad72373b0c491b582e7c196b0b1f9',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part4',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
- 'md5': 'd89ad028bcfad282918e8098e811711d',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part5',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
- 'md5': '9cb1e5c95da25dff0660c32ae50903b7',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part6',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
- 'md5': '155116e0ff1867bbc9b98df294faabc9',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part7',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}, {
- 'md5': '53f5db77622ae14fa493ed2a278a082b',
'info_dict': {
'id': 'e3f585b550a280af23c98b6cb2be19fb_part8',
'ext': 'f4v',
'title': '名侦探柯南第752集',
},
}],
+ 'params': {
+ 'skip_download': True,
+ },
}]
_FORMATS_MAP = [
@@ -212,20 +201,7 @@ class IqiyiIE(InfoExtractor):
return raw_data
def get_enc_key(self, swf_url, video_id):
- filename, _ = os.path.splitext(url_basename(swf_url))
- enc_key_json = self._downloader.cache.load('iqiyi-enc-key', filename)
- if enc_key_json is not None:
- return enc_key_json[0]
-
- req = self._request_webpage(
- swf_url, video_id, note='download swf content')
- cn = req.read()
- cn = zlib.decompress(cn[8:])
- pt = re.compile(b'MixerRemote\x08(?P<enc_key>.+?)\$&vv')
- enc_key = self._search_regex(pt, cn, 'enc_key').decode('utf8')
-
- self._downloader.cache.store('iqiyi-enc-key', filename, [enc_key])
-
+ enc_key = '3601ba290e4f4662848c710e2122007e' # last update at 2015-08-10 for Zombie
return enc_key
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/ir90tv.py b/youtube_dl/extractor/ir90tv.py
new file mode 100644
index 000000000..214bcd5b5
--- /dev/null
+++ b/youtube_dl/extractor/ir90tv.py
@@ -0,0 +1,42 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import remove_start
+
+
+class Ir90TvIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?90tv\.ir/video/(?P<id>[0-9]+)/.*'
+ _TESTS = [{
+ 'url': 'http://90tv.ir/video/95719/%D8%B4%D8%A7%DB%8C%D8%B9%D8%A7%D8%AA-%D9%86%D9%82%D9%84-%D9%88-%D8%A7%D9%86%D8%AA%D9%82%D8%A7%D9%84%D8%A7%D8%AA-%D9%85%D9%87%D9%85-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7-940218',
+ 'md5': '411dbd94891381960cb9e13daa47a869',
+ 'info_dict': {
+ 'id': '95719',
+ 'ext': 'mp4',
+ 'title': 'شایعات نقل و انتقالات مهم فوتبال اروپا 94/02/18',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ }
+ }, {
+ 'url': 'http://www.90tv.ir/video/95719/%D8%B4%D8%A7%DB%8C%D8%B9%D8%A7%D8%AA-%D9%86%D9%82%D9%84-%D9%88-%D8%A7%D9%86%D8%AA%D9%82%D8%A7%D9%84%D8%A7%D8%AA-%D9%85%D9%87%D9%85-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7-940218',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = remove_start(self._html_search_regex(
+ r'<title>([^<]+)</title>', webpage, 'title'), '90tv.ir :: ')
+
+ video_url = self._search_regex(
+ r'<source[^>]+src="([^"]+)"', webpage, 'video url')
+
+ thumbnail = self._search_regex(r'poster="([^"]+)"', webpage, 'thumbnail url', fatal=False)
+
+ return {
+ 'url': video_url,
+ 'id': video_id,
+ 'title': title,
+ 'video_url': video_url,
+ 'thumbnail': thumbnail,
+ }
diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py
index d28730492..3dca0e566 100644
--- a/youtube_dl/extractor/kaltura.py
+++ b/youtube_dl/extractor/kaltura.py
@@ -13,12 +13,24 @@ from ..utils import (
class KalturaIE(InfoExtractor):
_VALID_URL = r'''(?x)
- (?:kaltura:|
- https?://(:?(?:www|cdnapisec)\.)?kaltura\.com/index\.php/kwidget/(?:[^/]+/)*?wid/_
- )(?P<partner_id>\d+)
- (?::|
- /(?:[^/]+/)*?entry_id/
- )(?P<id>[0-9a-z_]+)'''
+ (?:
+ kaltura:(?P<partner_id_s>\d+):(?P<id_s>[0-9a-z_]+)|
+ https?://
+ (:?(?:www|cdnapisec)\.)?kaltura\.com/
+ (?:
+ (?:
+ # flash player
+ index\.php/kwidget/
+ (?:[^/]+/)*?wid/_(?P<partner_id>\d+)/
+ (?:[^/]+/)*?entry_id/(?P<id>[0-9a-z_]+)|
+ # html5 player
+ html5/html5lib/
+ (?:[^/]+/)*?entry_id/(?P<id_html5>[0-9a-z_]+)
+ .*\?.*\bwid=_(?P<partner_id_html5>\d+)
+ )
+ )
+ )
+ '''
_API_BASE = 'http://cdnapi.kaltura.com/api_v3/index.php?'
_TESTS = [
{
@@ -43,6 +55,10 @@ class KalturaIE(InfoExtractor):
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
'only_matching': True,
},
+ {
+ 'url': 'https://cdnapisec.kaltura.com/html5/html5lib/v2.30.2/mwEmbedFrame.php/p/1337/uiconf_id/20540612/entry_id/1_sf5ovm7u?wid=_243342',
+ 'only_matching': True,
+ }
]
def _kaltura_api_call(self, video_id, actions, *args, **kwargs):
@@ -105,9 +121,9 @@ class KalturaIE(InfoExtractor):
video_id, actions, note='Downloading video info JSON')
def _real_extract(self, url):
- video_id = self._match_id(url)
mobj = re.match(self._VALID_URL, url)
- partner_id, entry_id = mobj.group('partner_id'), mobj.group('id')
+ partner_id = mobj.group('partner_id_s') or mobj.group('partner_id') or mobj.group('partner_id_html5')
+ entry_id = mobj.group('id_s') or mobj.group('id') or mobj.group('id_html5')
info, source_data = self._get_video_info(entry_id, partner_id)
@@ -126,7 +142,7 @@ class KalturaIE(InfoExtractor):
self._sort_formats(formats)
return {
- 'id': video_id,
+ 'id': entry_id,
'title': info['name'],
'formats': formats,
'description': info.get('description'),
diff --git a/youtube_dl/extractor/kontrtube.py b/youtube_dl/extractor/kontrtube.py
index 720bc939b..a59c529f4 100644
--- a/youtube_dl/extractor/kontrtube.py
+++ b/youtube_dl/extractor/kontrtube.py
@@ -4,7 +4,10 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import (
+ int_or_none,
+ parse_duration,
+)
class KontrTubeIE(InfoExtractor):
@@ -34,33 +37,28 @@ class KontrTubeIE(InfoExtractor):
webpage = self._download_webpage(
url, display_id, 'Downloading page')
- video_url = self._html_search_regex(
+ video_url = self._search_regex(
r"video_url\s*:\s*'(.+?)/?',", webpage, 'video URL')
- thumbnail = self._html_search_regex(
- r"preview_url\s*:\s*'(.+?)/?',", webpage, 'video thumbnail', fatal=False)
+ thumbnail = self._search_regex(
+ r"preview_url\s*:\s*'(.+?)/?',", webpage, 'thumbnail', fatal=False)
title = self._html_search_regex(
- r'<title>(.+?)</title>', webpage, 'video title')
+ r'(?s)<h2>(.+?)</h2>', webpage, 'title')
description = self._html_search_meta(
- 'description', webpage, 'video description')
+ 'description', webpage, 'description')
- mobj = re.search(
- r'<div class="col_2">Длительность: <span>(?P<minutes>\d+)м:(?P<seconds>\d+)с</span></div>',
- webpage)
- duration = int(mobj.group('minutes')) * 60 + int(mobj.group('seconds')) if mobj else None
+ duration = self._search_regex(
+ r'Длительность: <em>([^<]+)</em>', webpage, 'duration', fatal=False)
+ if duration:
+ duration = parse_duration(duration.replace('мин', 'min').replace('сек', 'sec'))
- view_count = self._html_search_regex(
- r'<div class="col_2">Просмотров: <span>(\d+)</span></div>',
+ view_count = self._search_regex(
+ r'Просмотров: <em>([^<]+)</em>',
webpage, 'view count', fatal=False)
+ if view_count:
+ view_count = int_or_none(view_count.replace(' ', ''))
- comment_count = None
- comment_str = self._html_search_regex(
- r'Комментарии: <span>([^<]+)</span>', webpage, 'comment count', fatal=False)
- if comment_str.startswith('комментариев нет'):
- comment_count = 0
- else:
- mobj = re.search(r'\d+ из (?P<total>\d+) комментариев', comment_str)
- if mobj:
- comment_count = mobj.group('total')
+ comment_count = int_or_none(self._search_regex(
+ r'Комментарии \((\d+)\)<', webpage, ' comment count', fatal=False))
return {
'id': video_id,
diff --git a/youtube_dl/extractor/krasview.py b/youtube_dl/extractor/krasview.py
index 96f95979a..0ae8ebd68 100644
--- a/youtube_dl/extractor/krasview.py
+++ b/youtube_dl/extractor/krasview.py
@@ -25,6 +25,9 @@ class KrasViewIE(InfoExtractor):
'duration': 27,
'thumbnail': 're:^https?://.*\.jpg',
},
+ 'params': {
+ 'skip_download': 'Not accessible from Travis CI server',
+ },
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/kuwo.py b/youtube_dl/extractor/kuwo.py
index 1077846f2..fa233377d 100644
--- a/youtube_dl/extractor/kuwo.py
+++ b/youtube_dl/extractor/kuwo.py
@@ -202,6 +202,7 @@ class KuwoSingerIE(InfoExtractor):
'title': 'Ali',
},
'playlist_mincount': 95,
+ 'skip': 'Regularly stalls travis build', # See https://travis-ci.org/rg3/youtube-dl/jobs/78878540
}]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/lecture2go.py b/youtube_dl/extractor/lecture2go.py
new file mode 100644
index 000000000..40a3d2346
--- /dev/null
+++ b/youtube_dl/extractor/lecture2go.py
@@ -0,0 +1,62 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ parse_duration,
+ int_or_none,
+)
+
+
+class Lecture2GoIE(InfoExtractor):
+ _VALID_URL = r'https?://lecture2go\.uni-hamburg\.de/veranstaltungen/-/v/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://lecture2go.uni-hamburg.de/veranstaltungen/-/v/17473',
+ 'md5': 'ac02b570883020d208d405d5a3fd2f7f',
+ 'info_dict': {
+ 'id': '17473',
+ 'ext': 'flv',
+ 'title': '2 - Endliche Automaten und reguläre Sprachen',
+ 'creator': 'Frank Heitmann',
+ 'duration': 5220,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_regex(r'<em[^>]+class="title">(.+)</em>', webpage, 'title')
+
+ formats = []
+ for url in set(re.findall(r'"src","([^"]+)"', webpage)):
+ ext = determine_ext(url)
+ if ext == 'f4m':
+ formats.extend(self._extract_f4m_formats(url, video_id))
+ elif ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(url, video_id))
+ else:
+ formats.append({
+ 'url': url,
+ })
+
+ self._sort_formats(formats)
+
+ creator = self._html_search_regex(
+ r'<div[^>]+id="description">([^<]+)</div>', webpage, 'creator', fatal=False)
+ duration = parse_duration(self._html_search_regex(
+ r'Duration:\s*</em>\s*<em[^>]*>([^<]+)</em>', webpage, 'duration', fatal=False))
+ view_count = int_or_none(self._html_search_regex(
+ r'Views:\s*</em>\s*<em[^>]+>(\d+)</em>', webpage, 'view count', fatal=False))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'creator': creator,
+ 'duration': duration,
+ 'view_count': view_count,
+ }
diff --git a/youtube_dl/extractor/letv.py b/youtube_dl/extractor/letv.py
index ba2ae8085..a28abb0f0 100644
--- a/youtube_dl/extractor/letv.py
+++ b/youtube_dl/extractor/letv.py
@@ -15,6 +15,7 @@ from ..utils import (
determine_ext,
ExtractorError,
parse_iso8601,
+ int_or_none,
)
@@ -134,7 +135,7 @@ class LetvIE(InfoExtractor):
}
if format_id[-1:] == 'p':
- url_info_dict['height'] = format_id[:-1]
+ url_info_dict['height'] = int_or_none(format_id[:-1])
urls.append(url_info_dict)
diff --git a/youtube_dl/extractor/libsyn.py b/youtube_dl/extractor/libsyn.py
index 9ab1416f5..d375695f5 100644
--- a/youtube_dl/extractor/libsyn.py
+++ b/youtube_dl/extractor/libsyn.py
@@ -8,9 +8,9 @@ from ..utils import unified_strdate
class LibsynIE(InfoExtractor):
- _VALID_URL = r'https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+)'
+ _VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
- _TEST = {
+ _TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
'md5': '443360ee1b58007bc3dcf09b41d093bb',
'info_dict': {
@@ -19,12 +19,24 @@ class LibsynIE(InfoExtractor):
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
+ 'thumbnail': 're:^https?://.*',
},
- }
+ }, {
+ 'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
+ 'md5': '6c5cb21acd622d754d3b1a92b582ce42',
+ 'info_dict': {
+ 'id': '3727166',
+ 'ext': 'mp3',
+ 'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
+ 'upload_date': '20150818',
+ 'thumbnail': 're:^https?://.*',
+ }
+ }]
def _real_extract(self, url):
- video_id = self._match_id(url)
-
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('id')
+ url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
formats = [{
@@ -32,20 +44,18 @@ class LibsynIE(InfoExtractor):
} for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
podcast_title = self._search_regex(
- r'<h2>([^<]+)</h2>', webpage, 'title')
+ r'<h2>([^<]+)</h2>', webpage, 'podcast title', default=None)
episode_title = self._search_regex(
- r'<h3>([^<]+)</h3>', webpage, 'title', default=None)
+ r'(?:<div class="episode-title">|<h3>)([^<]+)</', webpage, 'episode title')
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<div id="info_text_body">(.+?)</div>', webpage,
- 'description', fatal=False)
-
+ 'description', default=None)
thumbnail = self._search_regex(
r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
-
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py
index a00f6e5e5..378117270 100644
--- a/youtube_dl/extractor/lynda.py
+++ b/youtube_dl/extractor/lynda.py
@@ -11,13 +11,13 @@ from ..compat import (
)
from ..utils import (
ExtractorError,
+ clean_html,
int_or_none,
)
class LyndaBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.lynda.com/login/login.aspx'
- _SUCCESSFUL_LOGIN_REGEX = r'isLoggedIn: true'
_ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
_NETRC_MACHINE = 'lynda'
@@ -41,7 +41,7 @@ class LyndaBaseIE(InfoExtractor):
request, None, 'Logging in as %s' % username)
# Not (yet) logged in
- m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
+ m = re.search(r'loginResultJson\s*=\s*\'(?P<json>[^\']+)\';', login_page)
if m is not None:
response = m.group('json')
response_json = json.loads(response)
@@ -70,7 +70,16 @@ class LyndaBaseIE(InfoExtractor):
request, None,
'Confirming log in and log out from another device')
- if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
+ if all(not re.search(p, login_page) for p in ('isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')):
+ if 'login error' in login_page:
+ mobj = re.search(
+ r'(?s)<h1[^>]+class="topmost">(?P<title>[^<]+)</h1>\s*<div>(?P<description>.+?)</div>',
+ login_page)
+ if mobj:
+ raise ExtractorError(
+ 'lynda returned error: %s - %s'
+ % (mobj.group('title'), clean_html(mobj.group('description'))),
+ expected=True)
raise ExtractorError('Unable to log in')
@@ -109,9 +118,7 @@ class LyndaIE(LyndaBaseIE):
'lynda returned error: %s' % video_json['Message'], expected=True)
if video_json['HasAccess'] is False:
- raise ExtractorError(
- 'Video %s is only available for members. '
- % video_id + self._ACCOUNT_CREDENTIALS_HINT, expected=True)
+ self.raise_login_required('Video %s is only available for members' % video_id)
video_id = compat_str(video_json['ID'])
duration = video_json['DurationInSeconds']
diff --git a/youtube_dl/extractor/mailru.py b/youtube_dl/extractor/mailru.py
index 54a14cb94..ab1300185 100644
--- a/youtube_dl/extractor/mailru.py
+++ b/youtube_dl/extractor/mailru.py
@@ -25,6 +25,7 @@ class MailRuIE(InfoExtractor):
'uploader_id': 'sonypicturesrus@mail.ru',
'duration': 184,
},
+ 'skip': 'Not accessible from Travis CI server',
},
{
'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html',
@@ -39,6 +40,7 @@ class MailRuIE(InfoExtractor):
'uploader_id': 'hitech@corp.mail.ru',
'duration': 245,
},
+ 'skip': 'Not accessible from Travis CI server',
},
]
diff --git a/youtube_dl/extractor/mdr.py b/youtube_dl/extractor/mdr.py
index 5fdd19027..fc7499958 100644
--- a/youtube_dl/extractor/mdr.py
+++ b/youtube_dl/extractor/mdr.py
@@ -29,7 +29,7 @@ class MDRIE(InfoExtractor):
doc = self._download_xml(domain + xmlurl, video_id)
formats = []
for a in doc.findall('./assets/asset'):
- url_el = a.find('.//progressiveDownloadUrl')
+ url_el = a.find('./progressiveDownloadUrl')
if url_el is None:
continue
abr = int(a.find('bitrateAudio').text) // 1000
diff --git a/youtube_dl/extractor/mit.py b/youtube_dl/extractor/mit.py
index d7ab6a9ae..f088ab9e2 100644
--- a/youtube_dl/extractor/mit.py
+++ b/youtube_dl/extractor/mit.py
@@ -18,12 +18,12 @@ class TechTVMITIE(InfoExtractor):
_TEST = {
'url': 'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set',
- 'md5': '1f8cb3e170d41fd74add04d3c9330e5f',
+ 'md5': '00a3a27ee20d44bcaa0933ccec4a2cf7',
'info_dict': {
'id': '25418',
'ext': 'mp4',
- 'title': 'MIT DNA Learning Center Set',
- 'description': 'md5:82313335e8a8a3f243351ba55bc1b474',
+ 'title': 'MIT DNA and Protein Sets',
+ 'description': 'md5:46f5c69ce434f0a97e7c628cc142802d',
},
}
@@ -33,8 +33,8 @@ class TechTVMITIE(InfoExtractor):
'http://techtv.mit.edu/videos/%s' % video_id, video_id)
clean_page = re.compile(r'<!--.*?-->', re.S).sub('', raw_page)
- base_url = self._search_regex(
- r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url')
+ base_url = self._proto_relative_url(self._search_regex(
+ r'ipadUrl: \'(.+?cloudfront.net/)', raw_page, 'base url'), 'http:')
formats_json = self._search_regex(
r'bitrates: (\[.+?\])', raw_page, 'video formats')
formats_mit = json.loads(formats_json)
diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py
index 88dcd4f73..69e4bcd1a 100644
--- a/youtube_dl/extractor/moniker.py
+++ b/youtube_dl/extractor/moniker.py
@@ -9,7 +9,10 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
-from ..utils import ExtractorError
+from ..utils import (
+ ExtractorError,
+ remove_start,
+)
class MonikerIE(InfoExtractor):
@@ -25,6 +28,14 @@ class MonikerIE(InfoExtractor):
'title': 'youtube-dl test video',
},
}, {
+ 'url': 'http://allmyvideos.net/embed-jih3nce3x6wn',
+ 'md5': '710883dee1bfc370ecf9fa6a89307c88',
+ 'info_dict': {
+ 'id': 'jih3nce3x6wn',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video',
+ },
+ }, {
'url': 'http://vidspot.net/l2ngsmhs8ci5',
'md5': '710883dee1bfc370ecf9fa6a89307c88',
'info_dict': {
@@ -38,7 +49,10 @@ class MonikerIE(InfoExtractor):
}]
def _real_extract(self, url):
- video_id = self._match_id(url)
+ orig_video_id = self._match_id(url)
+ video_id = remove_start(orig_video_id, 'embed-')
+ url = url.replace(orig_video_id, video_id)
+ assert re.match(self._VALID_URL, url) is not None
orig_webpage = self._download_webpage(url, video_id)
if '>File Not Found<' in orig_webpage:
diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py
index b48fac5e3..a597714e9 100644
--- a/youtube_dl/extractor/mtv.py
+++ b/youtube_dl/extractor/mtv.py
@@ -67,7 +67,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id):
- if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4)$', mdoc.find('.//src').text) is not None:
+ if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
self.to_screen('The normal version is not available from your '
'country, trying with the mobile version')
@@ -114,7 +114,8 @@ class MTVServicesInfoExtractor(InfoExtractor):
# Remove the templates, like &device={device}
mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url)
if 'acceptMethods' not in mediagen_url:
- mediagen_url += '&acceptMethods=fms'
+ mediagen_url += '&' if '?' in mediagen_url else '?'
+ mediagen_url += 'acceptMethods=fms'
mediagen_doc = self._download_xml(mediagen_url, video_id,
'Downloading video urls')
@@ -141,7 +142,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
if title_el is None:
title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
if title_el is None:
- title_el = itemdoc.find('.//title')
+ title_el = itemdoc.find('.//title') or itemdoc.find('./title')
if title_el.text is None:
title_el = None
@@ -174,8 +175,11 @@ class MTVServicesInfoExtractor(InfoExtractor):
if self._LANG:
info_url += 'lang=%s&' % self._LANG
info_url += data
+ return self._get_videos_info_from_url(info_url, video_id)
+
+ def _get_videos_info_from_url(self, url, video_id):
idoc = self._download_xml(
- info_url, video_id,
+ url, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
return self.playlist_result(
[self._get_video_info(item) for item in idoc.findall('.//item')])
@@ -288,3 +292,65 @@ class MTVIggyIE(MTVServicesInfoExtractor):
}
}
_FEED_URL = 'http://all.mtvworldverticals.com/feed-xml/'
+
+
+class MTVDEIE(MTVServicesInfoExtractor):
+ IE_NAME = 'mtv.de'
+ _VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:artists|shows|news)/(?:[^/]+/)*(?P<id>\d+)-[^/#?]+/*(?:[#?].*)?$'
+ _TESTS = [{
+ 'url': 'http://www.mtv.de/artists/10571-cro/videos/61131-traum',
+ 'info_dict': {
+ 'id': 'music_video-a50bc5f0b3aa4b3190aa',
+ 'ext': 'mp4',
+ 'title': 'MusicVideo_cro-traum',
+ 'description': 'Cro - Traum',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
+ 'url': 'http://www.mtv.de/shows/933-teen-mom-2/staffeln/5353/folgen/63565-enthullungen',
+ 'info_dict': {
+ 'id': 'local_playlist-f5ae778b9832cc837189',
+ 'ext': 'mp4',
+ 'title': 'Episode_teen-mom-2_shows_season-5_episode-1_full-episode_part1',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }, {
+ # single video in pagePlaylist with different id
+ 'url': 'http://www.mtv.de/news/77491-mtv-movies-spotlight-pixels-teil-3',
+ 'info_dict': {
+ 'id': 'local_playlist-4e760566473c4c8c5344',
+ 'ext': 'mp4',
+ 'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
+ 'description': 'MTV Movies Supercut',
+ },
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ playlist = self._parse_json(
+ self._search_regex(
+ r'window\.pagePlaylist\s*=\s*(\[.+?\]);\n', webpage, 'page playlist'),
+ video_id)
+
+ # news pages contain single video in playlist with different id
+ if len(playlist) == 1:
+ return self._get_videos_info_from_url(playlist[0]['mrss'], video_id)
+
+ for item in playlist:
+ item_id = item.get('id')
+ if item_id and compat_str(item_id) == video_id:
+ return self._get_videos_info_from_url(item['mrss'], video_id)
diff --git a/youtube_dl/extractor/musicvault.py b/youtube_dl/extractor/musicvault.py
deleted file mode 100644
index 0e46ac7c1..000000000
--- a/youtube_dl/extractor/musicvault.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import unicode_literals
-
-import re
-
-from .common import InfoExtractor
-
-
-class MusicVaultIE(InfoExtractor):
- _VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html'
- _TEST = {
- 'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html',
- 'md5': '3adcbdb3dcc02d647539e53f284ba171',
- 'info_dict': {
- 'id': '1010863',
- 'ext': 'mp4',
- 'uploader_id': 'the-allman-brothers-band',
- 'title': 'Straight from the Heart',
- 'duration': 244,
- 'uploader': 'The Allman Brothers Band',
- 'thumbnail': 're:^https?://.*/thumbnail/.*',
- 'upload_date': '20131219',
- 'location': 'Capitol Theatre (Passaic, NJ)',
- 'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981',
- 'timestamp': int,
- }
- }
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('display_id')
- webpage = self._download_webpage(url, display_id)
-
- thumbnail = self._search_regex(
- r'<meta itemprop="thumbnail" content="([^"]+)"',
- webpage, 'thumbnail', fatal=False)
-
- data_div = self._search_regex(
- r'(?s)<div class="data">(.*?)</div>', webpage, 'data fields')
- uploader = self._html_search_regex(
- r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False)
- title = self._html_search_regex(
- r'<h2.*?>(.*?)</h2>', data_div, 'title')
- location = self._html_search_regex(
- r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False)
-
- kaltura_id = self._search_regex(
- r'<div id="video-detail-player" data-kaltura-id="([^"]+)"',
- webpage, 'kaltura ID')
- wid = self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid')
-
- return {
- 'id': mobj.group('id'),
- '_type': 'url_transparent',
- 'url': 'kaltura:%s:%s' % (wid, kaltura_id),
- 'ie_key': 'Kaltura',
- 'display_id': display_id,
- 'uploader_id': mobj.group('uploader_id'),
- 'thumbnail': thumbnail,
- 'description': self._html_search_meta('description', webpage),
- 'location': location,
- 'title': title,
- 'uploader': uploader,
- }
diff --git a/youtube_dl/extractor/mwave.py b/youtube_dl/extractor/mwave.py
new file mode 100644
index 000000000..66b523197
--- /dev/null
+++ b/youtube_dl/extractor/mwave.py
@@ -0,0 +1,58 @@
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ int_or_none,
+ parse_duration,
+)
+
+
+class MwaveIE(InfoExtractor):
+ _VALID_URL = r'https?://mwave\.interest\.me/mnettv/videodetail\.m\?searchVideoDetailVO\.clip_id=(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=168859',
+ 'md5': 'c930e27b7720aaa3c9d0018dfc8ff6cc',
+ 'info_dict': {
+ 'id': '168859',
+ 'ext': 'flv',
+ 'title': '[M COUNTDOWN] SISTAR - SHAKE IT',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'M COUNTDOWN',
+ 'duration': 206,
+ 'view_count': int,
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ vod_info = self._download_json(
+ 'http://mwave.interest.me/onair/vod_info.m?vodtype=CL&sectorid=&endinfo=Y&id=%s' % video_id,
+ video_id, 'Download vod JSON')
+
+ formats = []
+ for num, cdn_info in enumerate(vod_info['cdn']):
+ stream_url = cdn_info.get('url')
+ if not stream_url:
+ continue
+ stream_name = cdn_info.get('name') or compat_str(num)
+ f4m_stream = self._download_json(
+ stream_url, video_id,
+ 'Download %s stream JSON' % stream_name)
+ f4m_url = f4m_stream.get('fileurl')
+ if not f4m_url:
+ continue
+ formats.extend(
+ self._extract_f4m_formats(f4m_url + '&hdcore=3.0.3', video_id, f4m_id=stream_name))
+ self._sort_formats(formats)
+
+ return {
+ 'id': video_id,
+ 'title': vod_info['title'],
+ 'thumbnail': vod_info.get('cover'),
+ 'uploader': vod_info.get('program_title'),
+ 'duration': parse_duration(vod_info.get('time')),
+ 'view_count': int_or_none(vod_info.get('hit')),
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/nationalgeographic.py b/youtube_dl/extractor/nationalgeographic.py
index f793b72f5..6fc9e7b05 100644
--- a/youtube_dl/extractor/nationalgeographic.py
+++ b/youtube_dl/extractor/nationalgeographic.py
@@ -8,18 +8,30 @@ from ..utils import (
class NationalGeographicIE(InfoExtractor):
- _VALID_URL = r'http://video\.nationalgeographic\.com/video/.*?'
-
- _TEST = {
- 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo',
- 'info_dict': {
- 'id': '4DmDACA6Qtk_',
- 'ext': 'flv',
- 'title': 'Mating Crabs Busted by Sharks',
- 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3',
+ _VALID_URL = r'http://video\.nationalgeographic\.com/.*?'
+
+ _TESTS = [
+ {
+ 'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo',
+ 'info_dict': {
+ 'id': '4DmDACA6Qtk_',
+ 'ext': 'flv',
+ 'title': 'Mating Crabs Busted by Sharks',
+ 'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3',
+ },
+ 'add_ie': ['ThePlatform'],
},
- 'add_ie': ['ThePlatform'],
- }
+ {
+ 'url': 'http://video.nationalgeographic.com/wild/when-sharks-attack/the-real-jaws',
+ 'info_dict': {
+ 'id': '_JeBD_D7PlS5',
+ 'ext': 'flv',
+ 'title': 'The Real Jaws',
+ 'description': 'md5:8d3e09d9d53a85cd397b4b21b2c77be6',
+ },
+ 'add_ie': ['ThePlatform'],
+ },
+ ]
def _real_extract(self, url):
name = url_basename(url)
@@ -37,5 +49,6 @@ class NationalGeographicIE(InfoExtractor):
return self.url_result(smuggle_url(
'http://link.theplatform.com/s/ngs/%s?format=SMIL&formats=MPEG4&manifest=f4m' % theplatform_id,
- # For some reason, the normal links don't work and we must force the use of f4m
+ # For some reason, the normal links don't work and we must force
+ # the use of f4m
{'force_smil_url': True}))
diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py
index dc2091be0..e683d24c4 100644
--- a/youtube_dl/extractor/nbc.py
+++ b/youtube_dl/extractor/nbc.py
@@ -124,7 +124,7 @@ class NBCSportsIE(InfoExtractor):
class NBCNewsIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?nbcnews\.com/
(?:video/.+?/(?P<id>\d+)|
- (?:feature|nightly-news)/[^/]+/(?P<title>.+))
+ (?:watch|feature|nightly-news)/[^/]+/(?P<title>.+))
'''
_TESTS = [
@@ -169,6 +169,10 @@ class NBCNewsIE(InfoExtractor):
'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
},
},
+ {
+ 'url': 'http://www.nbcnews.com/watch/dateline/full-episode--deadly-betrayal-386250819952',
+ 'only_matching': True,
+ },
]
def _real_extract(self, url):
@@ -232,3 +236,28 @@ class NBCNewsIE(InfoExtractor):
'url': info['videoAssets'][-1]['publicUrl'],
'ie_key': 'ThePlatform',
}
+
+
+class MSNBCIE(InfoExtractor):
+ # https URLs redirect to corresponding http ones
+ _VALID_URL = r'http://www\.msnbc\.com/[^/]+/watch/(?P<id>[^/]+)'
+ _TEST = {
+ 'url': 'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
+ 'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
+ 'info_dict': {
+ 'id': 'n_hayes_Aimm_140801_272214',
+ 'ext': 'mp4',
+ 'title': 'The chaotic GOP immigration vote',
+ 'description': 'The Republican House votes on a border bill that has no chance of getting through the Senate or signed by the President and is drawing criticism from all sides.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'timestamp': 1406937606,
+ 'upload_date': '20140802',
+ 'categories': ['MSNBC/Topics/Franchise/Best of last night', 'MSNBC/Topics/General/Congress'],
+ },
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+ embed_url = self._html_search_meta('embedURL', webpage)
+ return self.url_result(embed_url)
diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py
index 0f8aa5ada..bda1cff05 100644
--- a/youtube_dl/extractor/niconico.py
+++ b/youtube_dl/extractor/niconico.py
@@ -12,6 +12,7 @@ from ..compat import (
compat_urlparse,
)
from ..utils import (
+ encode_dict,
ExtractorError,
int_or_none,
parse_duration,
@@ -100,10 +101,7 @@ class NiconicoIE(InfoExtractor):
'mail': username,
'password': password,
}
- # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
- # chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
- login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
+ login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
request = compat_urllib_request.Request(
'https://secure.nicovideo.jp/secure/login', login_data)
login_results = self._download_webpage(
diff --git a/youtube_dl/extractor/nowtv.py b/youtube_dl/extractor/nowtv.py
index 0b5ff4760..c8257719f 100644
--- a/youtube_dl/extractor/nowtv.py
+++ b/youtube_dl/extractor/nowtv.py
@@ -1,12 +1,11 @@
# coding: utf-8
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
+ determine_ext,
int_or_none,
parse_iso8601,
parse_duration,
@@ -15,7 +14,7 @@ from ..utils import (
class NowTVIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?nowtv\.de/(?P<station>rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/player'
+ _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/(?:player|preview)'
_TESTS = [{
# rtl
@@ -23,7 +22,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '203519',
'display_id': 'bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Die neuen Bauern und eine Hochzeit',
'description': 'md5:e234e1ed6d63cf06be5c070442612e7e',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -32,7 +31,7 @@ class NowTVIE(InfoExtractor):
'duration': 2786,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -41,7 +40,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '203481',
'display_id': 'berlin-tag-nacht/berlin-tag-nacht-folge-934',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Berlin - Tag & Nacht (Folge 934)',
'description': 'md5:c85e88c2e36c552dfe63433bc9506dd0',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -50,7 +49,7 @@ class NowTVIE(InfoExtractor):
'duration': 2641,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -59,7 +58,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '165780',
'display_id': 'alarm-fuer-cobra-11-die-autobahnpolizei/hals-und-beinbruch-2014-08-23-21-10-00',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Hals- und Beinbruch',
'description': 'md5:b50d248efffe244e6f56737f0911ca57',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -68,7 +67,7 @@ class NowTVIE(InfoExtractor):
'duration': 2742,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -77,7 +76,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '99205',
'display_id': 'medicopter-117/angst',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Angst!',
'description': 'md5:30cbc4c0b73ec98bcd73c9f2a8c17c4e',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -86,7 +85,7 @@ class NowTVIE(InfoExtractor):
'duration': 3025,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -95,7 +94,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '203521',
'display_id': 'ratgeber-geld/thema-ua-der-erste-blick-die-apple-watch',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': 'Thema u.a.: Der erste Blick: Die Apple Watch',
'description': 'md5:4312b6c9d839ffe7d8caf03865a531af',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -104,7 +103,7 @@ class NowTVIE(InfoExtractor):
'duration': 1083,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
}, {
@@ -113,7 +112,7 @@ class NowTVIE(InfoExtractor):
'info_dict': {
'id': '128953',
'display_id': 'der-hundeprofi/buero-fall-chihuahua-joel',
- 'ext': 'mp4',
+ 'ext': 'flv',
'title': "Büro-Fall / Chihuahua 'Joel'",
'description': 'md5:e62cb6bf7c3cc669179d4f1eb279ad8d',
'thumbnail': 're:^https?://.*\.jpg$',
@@ -122,15 +121,25 @@ class NowTVIE(InfoExtractor):
'duration': 3092,
},
'params': {
- # m3u8 download
+ # rtmp download
'skip_download': True,
},
+ }, {
+ 'url': 'http://www.nowtv.de/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit/preview',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.nowtv.at/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit/preview?return=/rtl/bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.nowtv.de/rtl2/echtzeit/list/aktuell/schnelles-geld-am-ende-der-welt/player',
+ 'only_matching': True,
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- display_id = mobj.group('id')
- station = mobj.group('station')
+ display_id = self._match_id(url)
+ display_id_split = display_id.split('/')
+ if len(display_id) > 2:
+ display_id = '/'.join((display_id_split[0], display_id_split[-1]))
info = self._download_json(
'https://api.nowtv.de/v3/movies/%s?fields=id,title,free,geoblocked,articleLong,articleShort,broadcastStartDate,seoUrl,duration,format,files' % display_id,
@@ -148,29 +157,19 @@ class NowTVIE(InfoExtractor):
raise ExtractorError(
'Video %s is not available for free' % video_id, expected=True)
- f = info.get('format', {})
- station = f.get('station') or station
-
- STATIONS = {
- 'rtl': 'rtlnow',
- 'rtl2': 'rtl2now',
- 'vox': 'voxnow',
- 'nitro': 'rtlnitronow',
- 'ntv': 'n-tvnow',
- 'superrtl': 'superrtlnow'
- }
-
formats = []
for item in files['items']:
- item_path = remove_start(item['path'], '/')
- tbr = int_or_none(item['bitrate'])
- m3u8_url = 'http://hls.fra.%s.de/hls-vod-enc/%s.m3u8' % (STATIONS[station], item_path)
- m3u8_url = m3u8_url.replace('now/', 'now/videos/')
+ if determine_ext(item['path']) != 'f4v':
+ continue
+ app, play_path = remove_start(item['path'], '/').split('/', 1)
formats.append({
- 'url': m3u8_url,
- 'format_id': '%s-%sk' % (item['id'], tbr),
- 'ext': 'mp4',
- 'tbr': tbr,
+ 'url': 'rtmpe://fms.rtl.de',
+ 'app': app,
+ 'play_path': 'mp4:%s' % play_path,
+ 'ext': 'flv',
+ 'page_url': url,
+ 'player_url': 'http://rtl-now.rtl.de/includes/nc_player.swf',
+ 'tbr': int_or_none(item.get('bitrate')),
})
self._sort_formats(formats)
@@ -178,6 +177,8 @@ class NowTVIE(InfoExtractor):
description = info.get('articleLong') or info.get('articleShort')
timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ')
duration = parse_duration(info.get('duration'))
+
+ f = info.get('format', {})
thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo')
return {
diff --git a/youtube_dl/extractor/nowvideo.py b/youtube_dl/extractor/nowvideo.py
index dec09cdfe..17baa9679 100644
--- a/youtube_dl/extractor/nowvideo.py
+++ b/youtube_dl/extractor/nowvideo.py
@@ -7,7 +7,7 @@ class NowVideoIE(NovaMovIE):
IE_NAME = 'nowvideo'
IE_DESC = 'NowVideo'
- _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|sx|eu|at|ag|co|li)'}
+ _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|ec|sx|eu|at|ag|co|li)'}
_HOST = 'www.nowvideo.ch'
diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py
index 0c2d02c10..eb12fb810 100644
--- a/youtube_dl/extractor/npo.py
+++ b/youtube_dl/extractor/npo.py
@@ -407,6 +407,7 @@ class NPORadioFragmentIE(InfoExtractor):
class VPROIE(NPOIE):
+ IE_NAME = 'vpro'
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
_TESTS = [
diff --git a/youtube_dl/extractor/odnoklassniki.py b/youtube_dl/extractor/odnoklassniki.py
index 215ffe87b..66520c2c5 100644
--- a/youtube_dl/extractor/odnoklassniki.py
+++ b/youtube_dl/extractor/odnoklassniki.py
@@ -12,19 +12,21 @@ from ..utils import (
class OdnoklassnikiIE(InfoExtractor):
- _VALID_URL = r'https?://(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
+ _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)'
_TESTS = [{
# metadata in JSON
'url': 'http://ok.ru/video/20079905452',
- 'md5': '8e24ad2da6f387948e7a7d44eb8668fe',
+ 'md5': '6ba728d85d60aa2e6dd37c9e70fdc6bc',
'info_dict': {
'id': '20079905452',
'ext': 'mp4',
'title': 'Культура меняет нас (прекрасный ролик!))',
'duration': 100,
+ 'upload_date': '20141207',
'uploader_id': '330537914540',
'uploader': 'Виталий Добровольский',
'like_count': int,
+ 'age_limit': 0,
},
}, {
# metadataUrl
@@ -35,13 +37,33 @@ class OdnoklassnikiIE(InfoExtractor):
'ext': 'mp4',
'title': 'Девушка без комплексов ...',
'duration': 191,
+ 'upload_date': '20150518',
'uploader_id': '534380003155',
- 'uploader': 'Андрей Мещанинов',
+ 'uploader': '☭ Андрей Мещанинов ☭',
'like_count': int,
+ 'age_limit': 0,
+ },
+ }, {
+ # YouTube embed (metadataUrl, provider == USER_YOUTUBE)
+ 'url': 'http://ok.ru/video/64211978996595-1',
+ 'md5': '5d7475d428845cd2e13bae6f1a992278',
+ 'info_dict': {
+ 'id': '64211978996595-1',
+ 'ext': 'mp4',
+ 'title': 'Космическая среда от 26 августа 2015',
+ 'description': 'md5:848eb8b85e5e3471a3a803dae1343ed0',
+ 'duration': 440,
+ 'upload_date': '20150826',
+ 'uploader_id': '750099571',
+ 'uploader': 'Алина П',
+ 'age_limit': 0,
},
}, {
'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452',
'only_matching': True,
+ }, {
+ 'url': 'http://www.ok.ru/video/20648036891',
+ 'only_matching': True,
}]
def _real_extract(self, url):
@@ -52,7 +74,8 @@ class OdnoklassnikiIE(InfoExtractor):
player = self._parse_json(
unescapeHTML(self._search_regex(
- r'data-attributes="([^"]+)"', webpage, 'player')),
+ r'data-options=(?P<quote>["\'])(?P<player>{.+?%s.+?})(?P=quote)' % video_id,
+ webpage, 'player', group='player')),
video_id)
flashvars = player['flashvars']
@@ -85,16 +108,7 @@ class OdnoklassnikiIE(InfoExtractor):
like_count = int_or_none(metadata.get('likeCount'))
- quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd'))
-
- formats = [{
- 'url': f['url'],
- 'ext': 'mp4',
- 'format_id': f['name'],
- 'quality': quality(f['name']),
- } for f in metadata['videos']]
-
- return {
+ info = {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
@@ -104,5 +118,24 @@ class OdnoklassnikiIE(InfoExtractor):
'uploader_id': uploader_id,
'like_count': like_count,
'age_limit': age_limit,
- 'formats': formats,
}
+
+ if metadata.get('provider') == 'USER_YOUTUBE':
+ info.update({
+ '_type': 'url_transparent',
+ 'url': movie['contentId'],
+ })
+ return info
+
+ quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd'))
+
+ formats = [{
+ 'url': f['url'],
+ 'ext': 'mp4',
+ 'format_id': f['name'],
+ 'quality': quality(f['name']),
+ } for f in metadata['videos']]
+ self._sort_formats(formats)
+
+ info['formats'] = formats
+ return info
diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py
index fec5d65ad..683c81de3 100644
--- a/youtube_dl/extractor/pbs.py
+++ b/youtube_dl/extractor/pbs.py
@@ -32,7 +32,7 @@ class PBSIE(InfoExtractor):
'info_dict': {
'id': '2365006249',
'ext': 'mp4',
- 'title': 'A More Perfect Union',
+ 'title': 'Constitution USA with Peter Sagal - A More Perfect Union',
'description': 'md5:ba0c207295339c8d6eced00b7c363c6a',
'duration': 3190,
},
@@ -46,7 +46,7 @@ class PBSIE(InfoExtractor):
'info_dict': {
'id': '2365297690',
'ext': 'mp4',
- 'title': 'Losing Iraq',
+ 'title': 'FRONTLINE - Losing Iraq',
'description': 'md5:f5bfbefadf421e8bb8647602011caf8e',
'duration': 5050,
},
@@ -60,7 +60,7 @@ class PBSIE(InfoExtractor):
'info_dict': {
'id': '2201174722',
'ext': 'mp4',
- 'title': 'Cyber Schools Gain Popularity, but Quality Questions Persist',
+ 'title': 'PBS NewsHour - Cyber Schools Gain Popularity, but Quality Questions Persist',
'description': 'md5:5871c15cba347c1b3d28ac47a73c7c28',
'duration': 801,
},
@@ -72,7 +72,7 @@ class PBSIE(InfoExtractor):
'id': '2365297708',
'ext': 'mp4',
'description': 'md5:68d87ef760660eb564455eb30ca464fe',
- 'title': 'Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
+ 'title': 'Great Performances - Dudamel Conducts Verdi Requiem at the Hollywood Bowl - Full',
'duration': 6559,
'thumbnail': 're:^https?://.*\.jpg$',
},
@@ -88,10 +88,11 @@ class PBSIE(InfoExtractor):
'display_id': 'killer-typhoon',
'ext': 'mp4',
'description': 'md5:c741d14e979fc53228c575894094f157',
- 'title': 'Killer Typhoon',
+ 'title': 'NOVA - Killer Typhoon',
'duration': 3172,
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140122',
+ 'age_limit': 10,
},
'params': {
'skip_download': True, # requires ffmpeg
@@ -110,7 +111,7 @@ class PBSIE(InfoExtractor):
'id': '2280706814',
'display_id': 'player',
'ext': 'mp4',
- 'title': 'Death and the Civil War',
+ 'title': 'American Experience - Death and the Civil War',
'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.',
'duration': 6705,
'thumbnail': 're:^https?://.*\.jpg$',
@@ -118,6 +119,21 @@ class PBSIE(InfoExtractor):
'params': {
'skip_download': True, # requires ffmpeg
},
+ },
+ {
+ 'url': 'http://video.pbs.org/video/2365367186/',
+ 'info_dict': {
+ 'id': '2365367186',
+ 'display_id': '2365367186',
+ 'ext': 'mp4',
+ 'title': 'To Catch A Comet - Full Episode',
+ 'description': 'On November 12, 2014, billions of kilometers from Earth, spacecraft orbiter Rosetta and lander Philae did what no other had dared to attempt \u2014 land on the volatile surface of a comet as it zooms around the sun at 67,000 km/hr. The European Space Agency hopes this mission can help peer into our past and unlock secrets of our origins.',
+ 'duration': 3342,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ },
+ 'params': {
+ 'skip_download': True, # requires ffmpeg
+ },
}
]
@@ -232,6 +248,12 @@ class PBSIE(InfoExtractor):
'url': closed_captions_url,
}]
+ # info['title'] is often incomplete (e.g. 'Full Episode', 'Episode 5', etc)
+ # Try turning it to 'program - title' naming scheme if possible
+ alt_title = info.get('program', {}).get('title')
+ if alt_title:
+ info['title'] = alt_title + ' - ' + re.sub(r'^' + alt_title + '[\s\-:]+', '', info['title'])
+
return {
'id': video_id,
'display_id': display_id,
diff --git a/youtube_dl/extractor/periscope.py b/youtube_dl/extractor/periscope.py
new file mode 100644
index 000000000..8ad936758
--- /dev/null
+++ b/youtube_dl/extractor/periscope.py
@@ -0,0 +1,99 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urllib_parse,
+ compat_urllib_request,
+)
+from ..utils import parse_iso8601
+
+
+class PeriscopeIE(InfoExtractor):
+ IE_DESC = 'Periscope'
+ _VALID_URL = r'https?://(?:www\.)?periscope\.tv/w/(?P<id>[^/?#]+)'
+ _TEST = {
+ 'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==',
+ 'md5': '65b57957972e503fcbbaeed8f4fa04ca',
+ 'info_dict': {
+ 'id': '56102209',
+ 'ext': 'mp4',
+ 'title': 'Bec Boop - 🚠✈️🇬🇧 Fly above #London in Emirates Air Line cable car at night 🇬🇧✈️🚠 #BoopScope 🎀💗',
+ 'timestamp': 1438978559,
+ 'upload_date': '20150807',
+ 'uploader': 'Bec Boop',
+ 'uploader_id': '1465763',
+ },
+ 'skip': 'Expires in 24 hours',
+ }
+
+ def _call_api(self, method, token):
+ return self._download_json(
+ 'https://api.periscope.tv/api/v2/%s?token=%s' % (method, token), token)
+
+ def _real_extract(self, url):
+ token = self._match_id(url)
+
+ broadcast_data = self._call_api('getBroadcastPublic', token)
+ broadcast = broadcast_data['broadcast']
+ status = broadcast['status']
+
+ uploader = broadcast.get('user_display_name') or broadcast_data.get('user', {}).get('display_name')
+ uploader_id = broadcast.get('user_id') or broadcast_data.get('user', {}).get('id')
+
+ title = '%s - %s' % (uploader, status) if uploader else status
+ state = broadcast.get('state').lower()
+ if state == 'running':
+ title = self._live_title(title)
+ timestamp = parse_iso8601(broadcast.get('created_at'))
+
+ thumbnails = [{
+ 'url': broadcast[image],
+ } for image in ('image_url', 'image_url_small') if broadcast.get(image)]
+
+ stream = self._call_api('getAccessPublic', token)
+
+ formats = []
+ for format_id in ('replay', 'rtmp', 'hls', 'https_hls'):
+ video_url = stream.get(format_id + '_url')
+ if not video_url:
+ continue
+ f = {
+ 'url': video_url,
+ 'ext': 'flv' if format_id == 'rtmp' else 'mp4',
+ }
+ if format_id != 'rtmp':
+ f['protocol'] = 'm3u8_native' if state == 'ended' else 'm3u8'
+ formats.append(f)
+ self._sort_formats(formats)
+
+ return {
+ 'id': broadcast.get('id') or token,
+ 'title': title,
+ 'timestamp': timestamp,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }
+
+
+class QuickscopeIE(InfoExtractor):
+ IE_DESC = 'Quick Scope'
+ _VALID_URL = r'https?://watchonperiscope\.com/broadcast/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://watchonperiscope.com/broadcast/56180087',
+ 'only_matching': True,
+ }
+
+ def _real_extract(self, url):
+ broadcast_id = self._match_id(url)
+ request = compat_urllib_request.Request(
+ 'https://watchonperiscope.com/api/accessChannel', compat_urllib_parse.urlencode({
+ 'broadcast_id': broadcast_id,
+ 'entry_ticket': '',
+ 'from_push': 'false',
+ 'uses_sessions': 'true',
+ }).encode('utf-8'))
+ return self.url_result(
+ self._download_json(request, broadcast_id)['share_url'], 'Periscope')
diff --git a/youtube_dl/extractor/playtvak.py b/youtube_dl/extractor/playtvak.py
new file mode 100644
index 000000000..e360404f7
--- /dev/null
+++ b/youtube_dl/extractor/playtvak.py
@@ -0,0 +1,181 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_urlparse,
+ compat_urllib_parse,
+)
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_iso8601,
+ qualities,
+)
+
+
+class PlaytvakIE(InfoExtractor):
+ IE_DESC = 'Playtvak.cz, iDNES.cz and Lidovky.cz'
+ _VALID_URL = r'https?://(?:.+?\.)?(?:playtvak|idnes|lidovky|metro)\.cz/.*\?(?:c|idvideo)=(?P<id>[^&]+)'
+ _TESTS = [{
+ 'url': 'http://www.playtvak.cz/vyzente-vosy-a-srsne-ze-zahrady-dn5-/hodinovy-manzel.aspx?c=A150730_150323_hodinovy-manzel_kuko',
+ 'md5': '4525ae312c324b4be2f4603cc78ceb4a',
+ 'info_dict': {
+ 'id': 'A150730_150323_hodinovy-manzel_kuko',
+ 'ext': 'mp4',
+ 'title': 'Vyžeňte vosy a sršně ze zahrady',
+ 'description': 'md5:f93d398691044d303bc4a3de62f3e976',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'duration': 279,
+ 'timestamp': 1438732860,
+ 'upload_date': '20150805',
+ 'is_live': False,
+ }
+ }, { # live video test
+ 'url': 'http://slowtv.playtvak.cz/planespotting-0pr-/planespotting.aspx?c=A150624_164934_planespotting_cat',
+ 'info_dict': {
+ 'id': 'A150624_164934_planespotting_cat',
+ 'ext': 'flv',
+ 'title': 're:^Přímý přenos iDNES.cz [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
+ 'description': 'Sledujte provoz na ranveji Letiště Václava Havla v Praze',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'is_live': True,
+ },
+ 'params': {
+ 'skip_download': True, # requires rtmpdump
+ },
+ }, { # idnes.cz
+ 'url': 'http://zpravy.idnes.cz/pes-zavreny-v-aute-rozbijeni-okynek-v-aute-fj5-/domaci.aspx?c=A150809_104116_domaci_pku',
+ 'md5': '819832ba33cd7016e58a6658577fe289',
+ 'info_dict': {
+ 'id': 'A150809_104116_domaci_pku',
+ 'ext': 'mp4',
+ 'title': 'Zavřeli jsme mraženou pizzu do auta. Upekla se',
+ 'description': 'md5:01e73f02329e2e5760bd5eed4d42e3c2',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'duration': 39,
+ 'timestamp': 1438969140,
+ 'upload_date': '20150807',
+ 'is_live': False,
+ }
+ }, { # lidovky.cz
+ 'url': 'http://www.lidovky.cz/dalsi-demonstrace-v-praze-o-migraci-duq-/video.aspx?c=A150808_214044_ln-video_ELE',
+ 'md5': 'c7209ac4ba9d234d4ad5bab7485bcee8',
+ 'info_dict': {
+ 'id': 'A150808_214044_ln-video_ELE',
+ 'ext': 'mp4',
+ 'title': 'Táhni! Demonstrace proti imigrantům budila emoce',
+ 'description': 'md5:97c81d589a9491fbfa323c9fa3cca72c',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'timestamp': 1439052180,
+ 'upload_date': '20150808',
+ 'is_live': False,
+ }
+ }, { # metro.cz
+ 'url': 'http://www.metro.cz/video-pod-billboardem-se-na-vltavske-roztocil-kolotoc-deti-vozil-jen-par-hodin-1hx-/metro-extra.aspx?c=A141111_173251_metro-extra_row',
+ 'md5': '84fc1deedcac37b7d4a6ccae7c716668',
+ 'info_dict': {
+ 'id': 'A141111_173251_metro-extra_row',
+ 'ext': 'mp4',
+ 'title': 'Recesisté udělali z billboardu kolotoč',
+ 'description': 'md5:7369926049588c3989a66c9c1a043c4c',
+ 'thumbnail': 're:(?i)^https?://.*\.(?:jpg|png)$',
+ 'timestamp': 1415725500,
+ 'upload_date': '20141111',
+ 'is_live': False,
+ }
+ }, {
+ 'url': 'http://www.playtvak.cz/embed.aspx?idvideo=V150729_141549_play-porad_kuko',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ info_url = self._html_search_regex(
+ r'Misc\.videoFLV\(\s*{\s*data\s*:\s*"([^"]+)"', webpage, 'info url')
+
+ parsed_url = compat_urlparse.urlparse(info_url)
+
+ qs = compat_urlparse.parse_qs(parsed_url.query)
+ qs.update({
+ 'reklama': ['0'],
+ 'type': ['js'],
+ })
+
+ info_url = compat_urlparse.urlunparse(
+ parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+
+ json_info = self._download_json(
+ info_url, video_id,
+ transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1])
+
+ item = None
+ for i in json_info['items']:
+ if i.get('type') == 'video' or i.get('type') == 'stream':
+ item = i
+ break
+ if not item:
+ raise ExtractorError('No suitable stream found')
+
+ quality = qualities(('low', 'middle', 'high'))
+
+ formats = []
+ for fmt in item['video']:
+ video_url = fmt.get('file')
+ if not video_url:
+ continue
+
+ format_ = fmt['format']
+ format_id = '%s_%s' % (format_, fmt['quality'])
+ preference = None
+
+ if format_ in ('mp4', 'webm'):
+ ext = format_
+ elif format_ == 'rtmp':
+ ext = 'flv'
+ elif format_ == 'apple':
+ ext = 'mp4'
+ # Some streams have mp3 audio which does not play
+ # well with ffmpeg filter aac_adtstoasc
+ preference = -1
+ elif format_ == 'adobe': # f4m manifest fails with 404 in 80% of requests
+ continue
+ else: # Other formats not supported yet
+ continue
+
+ formats.append({
+ 'url': video_url,
+ 'ext': ext,
+ 'format_id': format_id,
+ 'quality': quality(fmt.get('quality')),
+ 'preference': preference,
+ })
+ self._sort_formats(formats)
+
+ title = item['title']
+ is_live = item['type'] == 'stream'
+ if is_live:
+ title = self._live_title(title)
+ description = self._og_search_description(webpage, default=None) or self._html_search_meta(
+ 'description', webpage, 'description')
+ timestamp = None
+ duration = None
+ if not is_live:
+ duration = int_or_none(item.get('length'))
+ timestamp = item.get('published')
+ if timestamp:
+ timestamp = parse_iso8601(timestamp[:-5])
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': item.get('image'),
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'is_live': is_live,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py
new file mode 100644
index 000000000..fd32836cc
--- /dev/null
+++ b/youtube_dl/extractor/pluralsight.py
@@ -0,0 +1,207 @@
+from __future__ import unicode_literals
+
+import re
+import json
+
+from .common import InfoExtractor
+from ..compat import (
+ compat_str,
+ compat_urllib_parse,
+ compat_urllib_request,
+ compat_urlparse,
+)
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_duration,
+)
+
+
+class PluralsightIE(InfoExtractor):
+ IE_NAME = 'pluralsight'
+ _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/training/player\?author=(?P<author>[^&]+)&name=(?P<name>[^&]+)(?:&mode=live)?&clip=(?P<clip>\d+)&course=(?P<course>[^&]+)'
+ _LOGIN_URL = 'https://www.pluralsight.com/id/'
+ _NETRC_MACHINE = 'pluralsight'
+
+ _TEST = {
+ 'url': 'http://www.pluralsight.com/training/player?author=mike-mckeown&name=hosting-sql-server-windows-azure-iaas-m7-mgmt&mode=live&clip=3&course=hosting-sql-server-windows-azure-iaas',
+ 'md5': '4d458cf5cf4c593788672419a8dd4cf8',
+ 'info_dict': {
+ 'id': 'hosting-sql-server-windows-azure-iaas-m7-mgmt-04',
+ 'ext': 'mp4',
+ 'title': 'Management of SQL Server - Demo Monitoring',
+ 'duration': 338,
+ },
+ 'skip': 'Requires pluralsight account credentials',
+ }
+
+ def _real_initialize(self):
+ self._login()
+
+ def _login(self):
+ (username, password) = self._get_login_info()
+ if username is None:
+ self.raise_login_required('Pluralsight account is required')
+
+ login_page = self._download_webpage(
+ self._LOGIN_URL, None, 'Downloading login page')
+
+ login_form = self._hidden_inputs(login_page)
+
+ login_form.update({
+ 'Username': username.encode('utf-8'),
+ 'Password': password.encode('utf-8'),
+ })
+
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
+ 'post url', default=self._LOGIN_URL, group='url')
+
+ if not post_url.startswith('http'):
+ post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
+
+ request = compat_urllib_request.Request(
+ post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ request.add_header('Content-Type', 'application/x-www-form-urlencoded')
+
+ response = self._download_webpage(
+ request, None, 'Logging in as %s' % username)
+
+ error = self._search_regex(
+ r'<span[^>]+class="field-validation-error"[^>]*>([^<]+)</span>',
+ response, 'error message', default=None)
+ if error:
+ raise ExtractorError('Unable to login: %s' % error, expected=True)
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ author = mobj.group('author')
+ name = mobj.group('name')
+ clip_id = mobj.group('clip')
+ course = mobj.group('course')
+
+ display_id = '%s-%s' % (name, clip_id)
+
+ webpage = self._download_webpage(url, display_id)
+
+ collection = self._parse_json(
+ self._search_regex(
+ r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)',
+ webpage, 'modules'),
+ display_id)
+
+ module, clip = None, None
+
+ for module_ in collection:
+ if module_.get('moduleName') == name:
+ module = module_
+ for clip_ in module_.get('clips', []):
+ clip_index = clip_.get('clipIndex')
+ if clip_index is None:
+ continue
+ if compat_str(clip_index) == clip_id:
+ clip = clip_
+ break
+
+ if not clip:
+ raise ExtractorError('Unable to resolve clip')
+
+ QUALITIES = {
+ 'low': {'width': 640, 'height': 480},
+ 'medium': {'width': 848, 'height': 640},
+ 'high': {'width': 1024, 'height': 768},
+ }
+
+ ALLOWED_QUALITIES = (
+ ('webm', ('high',)),
+ ('mp4', ('low', 'medium', 'high',)),
+ )
+
+ formats = []
+ for ext, qualities in ALLOWED_QUALITIES:
+ for quality in qualities:
+ f = QUALITIES[quality].copy()
+ clip_post = {
+ 'a': author,
+ 'cap': 'false',
+ 'cn': clip_id,
+ 'course': course,
+ 'lc': 'en',
+ 'm': name,
+ 'mt': ext,
+ 'q': '%dx%d' % (f['width'], f['height']),
+ }
+ request = compat_urllib_request.Request(
+ 'http://www.pluralsight.com/training/Player/ViewClip',
+ json.dumps(clip_post).encode('utf-8'))
+ request.add_header('Content-Type', 'application/json;charset=utf-8')
+ format_id = '%s-%s' % (ext, quality)
+ clip_url = self._download_webpage(
+ request, display_id, 'Downloading %s URL' % format_id, fatal=False)
+ if not clip_url:
+ continue
+ f.update({
+ 'url': clip_url,
+ 'ext': ext,
+ 'format_id': format_id,
+ })
+ formats.append(f)
+ self._sort_formats(formats)
+
+ # TODO: captions
+ # http://www.pluralsight.com/training/Player/ViewClip + cap = true
+ # or
+ # http://www.pluralsight.com/training/Player/Captions
+ # { a = author, cn = clip_id, lc = end, m = name }
+
+ return {
+ 'id': clip['clipName'],
+ 'title': '%s - %s' % (module['title'], clip['title']),
+ 'duration': int_or_none(clip.get('duration')) or parse_duration(clip.get('formattedDuration')),
+ 'creator': author,
+ 'formats': formats
+ }
+
+
+class PluralsightCourseIE(InfoExtractor):
+ IE_NAME = 'pluralsight:course'
+ _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/courses/(?P<id>[^/]+)'
+ _TEST = {
+ # Free course from Pluralsight Starter Subscription for Microsoft TechNet
+ # https://offers.pluralsight.com/technet?loc=zTS3z&prod=zOTprodz&tech=zOttechz&prog=zOTprogz&type=zSOz&media=zOTmediaz&country=zUSz
+ 'url': 'http://www.pluralsight.com/courses/hosting-sql-server-windows-azure-iaas',
+ 'info_dict': {
+ 'id': 'hosting-sql-server-windows-azure-iaas',
+ 'title': 'Hosting SQL Server in Microsoft Azure IaaS Fundamentals',
+ 'description': 'md5:61b37e60f21c4b2f91dc621a977d0986',
+ },
+ 'playlist_count': 31,
+ }
+
+ def _real_extract(self, url):
+ course_id = self._match_id(url)
+
+ # TODO: PSM cookie
+
+ course = self._download_json(
+ 'http://www.pluralsight.com/data/course/%s' % course_id,
+ course_id, 'Downloading course JSON')
+
+ title = course['title']
+ description = course.get('description') or course.get('shortDescription')
+
+ course_data = self._download_json(
+ 'http://www.pluralsight.com/data/course/content/%s' % course_id,
+ course_id, 'Downloading course data JSON')
+
+ entries = []
+ for module in course_data:
+ for clip in module.get('clips', []):
+ player_parameters = clip.get('playerParameters')
+ if not player_parameters:
+ continue
+ entries.append(self.url_result(
+ 'http://www.pluralsight.com/training/player?%s' % player_parameters,
+ 'Pluralsight'))
+
+ return self.playlist_result(entries, course_id, title, description)
diff --git a/youtube_dl/extractor/porn91.py b/youtube_dl/extractor/porn91.py
index 72d1b2718..3e15533e9 100644
--- a/youtube_dl/extractor/porn91.py
+++ b/youtube_dl/extractor/porn91.py
@@ -22,6 +22,7 @@ class Porn91IE(InfoExtractor):
'title': '18岁大一漂亮学妹,水嫩性感,再爽一次!',
'ext': 'mp4',
'duration': 431,
+ 'age_limit': 18,
}
}
@@ -68,4 +69,5 @@ class Porn91IE(InfoExtractor):
'url': video_url,
'duration': duration,
'comment_count': comment_count,
+ 'age_limit': self._rta_search(webpage),
}
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 0b7886840..7b0cdc41a 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -81,7 +81,7 @@ class PornHubIE(InfoExtractor):
comment_count = self._extract_count(
r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment')
- video_urls = list(map(compat_urllib_parse_unquote, re.findall(r'"quality_[0-9]{3}p":"([^"]+)', webpage)))
+ video_urls = list(map(compat_urllib_parse_unquote, re.findall(r"player_quality_[0-9]{3}p\s*=\s*'([^']+)'", webpage)))
if webpage.find('"encrypted":true') != -1:
password = compat_urllib_parse_unquote_plus(
self._search_regex(r'"video_title":"([^"]+)', webpage, 'password'))
@@ -94,7 +94,7 @@ class PornHubIE(InfoExtractor):
format = path.split('/')[5].split('_')[:2]
format = "-".join(format)
- m = re.match(r'^(?P<height>[0-9]+)P-(?P<tbr>[0-9]+)K$', format)
+ m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
if m is None:
height = None
tbr = None
diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index fec008ce7..effcf1db3 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -9,7 +9,9 @@ from ..compat import (
compat_urllib_parse,
)
from ..utils import (
+ ExtractorError,
determine_ext,
+ float_or_none,
int_or_none,
unified_strdate,
)
@@ -224,10 +226,13 @@ class ProSiebenSat1IE(InfoExtractor):
'ids': clip_id,
})
- videos = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')
+ video = self._download_json(videos_api_url, clip_id, 'Downloading videos JSON')[0]
- duration = float(videos[0]['duration'])
- source_ids = [source['id'] for source in videos[0]['sources']]
+ if video.get('is_protected') is True:
+ raise ExtractorError('This video is DRM protected.', expected=True)
+
+ duration = float_or_none(video.get('duration'))
+ source_ids = [source['id'] for source in video['sources']]
source_ids_str = ','.join(map(str, source_ids))
g = '01!8d8F_)r9]4s[qeuXfP%'
diff --git a/youtube_dl/extractor/rtl2.py b/youtube_dl/extractor/rtl2.py
index 72cd80498..25f7faf76 100644
--- a/youtube_dl/extractor/rtl2.py
+++ b/youtube_dl/extractor/rtl2.py
@@ -1,6 +1,7 @@
# encoding: utf-8
from __future__ import unicode_literals
+import re
from .common import InfoExtractor
@@ -8,22 +9,28 @@ class RTL2IE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?rtl2\.de/[^?#]*?/(?P<id>[^?#/]*?)(?:$|/(?:$|[?#]))'
_TESTS = [{
'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0',
- 'md5': 'bfcc179030535b08dc2b36b469b5adc7',
'info_dict': {
'id': 'folge-203-0',
'ext': 'f4v',
'title': 'GRIP sucht den Sommerkönig',
'description': 'Matthias, Det und Helge treten gegeneinander an.'
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}, {
'url': 'http://www.rtl2.de/sendung/koeln-50667/video/5512-anna/21040-anna-erwischt-alex/',
- 'md5': 'ffcd517d2805b57ce11a58a2980c2b02',
'info_dict': {
'id': '21040-anna-erwischt-alex',
'ext': 'mp4',
'title': 'Anna erwischt Alex!',
'description': 'Anna ist Alex\' Tochter bei Köln 50667.'
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
@@ -34,12 +41,18 @@ class RTL2IE(InfoExtractor):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
- vico_id = self._html_search_regex(
- r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id')
- vivi_id = self._html_search_regex(
- r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id')
+ mobj = re.search(
+ r'<div[^>]+data-collection="(?P<vico_id>\d+)"[^>]+data-video="(?P<vivi_id>\d+)"',
+ webpage)
+ if mobj:
+ vico_id = mobj.group('vico_id')
+ vivi_id = mobj.group('vivi_id')
+ else:
+ vico_id = self._html_search_regex(
+ r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id')
+ vivi_id = self._html_search_regex(
+ r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id')
info_url = 'http://www.rtl2.de/video/php/get_video.php?vico_id=' + vico_id + '&vivi_id=' + vivi_id
- webpage = self._download_webpage(info_url, '')
info = self._download_json(info_url, video_id)
video_info = info['video']
diff --git a/youtube_dl/extractor/rtlnl.py b/youtube_dl/extractor/rtlnl.py
index a4d3d73ff..543d94417 100644
--- a/youtube_dl/extractor/rtlnl.py
+++ b/youtube_dl/extractor/rtlnl.py
@@ -44,6 +44,21 @@ class RtlNlIE(InfoExtractor):
'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.',
}
}, {
+ # empty synopsis and missing episodes (see https://github.com/rg3/youtube-dl/issues/6275)
+ 'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false',
+ 'info_dict': {
+ 'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a',
+ 'ext': 'mp4',
+ 'title': 'RTL Nieuws - Meer beelden van overval juwelier',
+ 'thumbnail': 're:^https?://screenshots\.rtl\.nl/system/thumb/sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$',
+ 'timestamp': 1437233400,
+ 'upload_date': '20150718',
+ 'duration': 30.474,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
# encrypted m3u8 streams, georestricted
'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7',
'only_matching': True,
@@ -59,22 +74,29 @@ class RtlNlIE(InfoExtractor):
uuid)
material = info['material'][0]
- progname = info['abstracts'][0]['name']
- subtitle = material['title'] or info['episodes'][0]['name']
- description = material.get('synopsis') or info['episodes'][0]['synopsis']
+ title = info['abstracts'][0]['name']
+ subtitle = material.get('title')
+ if subtitle:
+ title += ' - %s' % subtitle
+ description = material.get('synopsis')
meta = info.get('meta', {})
- # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118)
- # NB: nowadays, recent ffmpeg and avconv can handle these encrypted streams, so
- # this adaptive -> flash workaround is not required in general, but it also
- # allows bypassing georestriction therefore is retained for now.
- videopath = material['videopath'].replace('/adaptive/', '/flash/')
+ # m3u8 streams are encrypted and may not be handled properly by older ffmpeg/avconv.
+ # To workaround this previously adaptive -> flash trick was used to obtain
+ # unencrypted m3u8 streams (see https://github.com/rg3/youtube-dl/issues/4118)
+ # and bypass georestrictions as well.
+ # Currently, unencrypted m3u8 playlists are (intentionally?) invalid and therefore
+ # unusable albeit can be fixed by simple string replacement (see
+ # https://github.com/rg3/youtube-dl/pull/6337)
+ # Since recent ffmpeg and avconv handle encrypted streams just fine encrypted
+ # streams are used now.
+ videopath = material['videopath']
m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath
formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
- video_urlpart = videopath.split('/flash/')[1][:-5]
+ video_urlpart = videopath.split('/adaptive/')[1][:-5]
PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
formats.extend([
@@ -107,7 +129,7 @@ class RtlNlIE(InfoExtractor):
return {
'id': uuid,
- 'title': '%s - %s' % (progname, subtitle),
+ 'title': title,
'formats': formats,
'timestamp': material['original_date'],
'description': description,
diff --git a/youtube_dl/extractor/rtp.py b/youtube_dl/extractor/rtp.py
index ecf4939cd..82b323cdd 100644
--- a/youtube_dl/extractor/rtp.py
+++ b/youtube_dl/extractor/rtp.py
@@ -18,6 +18,10 @@ class RTPIE(InfoExtractor):
'description': 'As paixões musicais de António Cartaxo e António Macedo',
'thumbnail': 're:^https?://.*\.jpg',
},
+ 'params': {
+ # rtmp download
+ 'skip_download': True,
+ },
}, {
'url': 'http://www.rtp.pt/play/p831/a-quimica-das-coisas',
'only_matching': True,
diff --git a/youtube_dl/extractor/rts.py b/youtube_dl/extractor/rts.py
index 9fbe239d8..12639f08b 100644
--- a/youtube_dl/extractor/rts.py
+++ b/youtube_dl/extractor/rts.py
@@ -19,7 +19,16 @@ from ..utils import (
class RTSIE(InfoExtractor):
IE_DESC = 'RTS.ch'
- _VALID_URL = r'https?://(?:www\.)?rts\.ch/(?:(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+))'
+ _VALID_URL = r'''(?x)
+ (?:
+ rts:(?P<rts_id>\d+)|
+ https?://
+ (?:www\.)?rts\.ch/
+ (?:
+ (?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|
+ play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+)
+ )
+ )'''
_TESTS = [
{
@@ -123,6 +132,15 @@ class RTSIE(InfoExtractor):
},
},
{
+ # article with videos on rhs
+ 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html',
+ 'info_dict': {
+ 'id': '6693917',
+ 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse',
+ },
+ 'playlist_mincount': 5,
+ },
+ {
'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280',
'only_matching': True,
}
@@ -130,7 +148,7 @@ class RTSIE(InfoExtractor):
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
- video_id = m.group('id') or m.group('id_new')
+ video_id = m.group('rts_id') or m.group('id') or m.group('id_new')
display_id = m.group('display_id') or m.group('display_id_new')
def download_json(internal_id):
@@ -143,6 +161,15 @@ class RTSIE(InfoExtractor):
# video_id extracted out of URL is not always a real id
if 'video' not in all_info and 'audio' not in all_info:
page = self._download_webpage(url, display_id)
+
+ # article with videos on rhs
+ videos = re.findall(
+ r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:rts:video:(\d+)"',
+ page)
+ if videos:
+ entries = [self.url_result('rts:%s' % video_urn, 'RTS') for video_urn in videos]
+ return self.playlist_result(entries, video_id, self._og_search_title(page))
+
internal_id = self._html_search_regex(
r'<(?:video|audio) data-id="([0-9]+)"', page,
'internal video id')
diff --git a/youtube_dl/extractor/rtve.py b/youtube_dl/extractor/rtve.py
index 82cd98ac7..5b97d33ca 100644
--- a/youtube_dl/extractor/rtve.py
+++ b/youtube_dl/extractor/rtve.py
@@ -6,7 +6,7 @@ import re
import time
from .common import InfoExtractor
-from ..compat import compat_urlparse
+from ..compat import compat_urllib_request, compat_urlparse
from ..utils import (
ExtractorError,
float_or_none,
@@ -102,7 +102,9 @@ class RTVEALaCartaIE(InfoExtractor):
if info['state'] == 'DESPU':
raise ExtractorError('The video is no longer available', expected=True)
png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id)
- png = self._download_webpage(png_url, video_id, 'Downloading url information')
+ png_request = compat_urllib_request.Request(png_url)
+ png_request.add_header('Referer', url)
+ png = self._download_webpage(png_request, video_id, 'Downloading url information')
video_url = _decrypt_url(png)
if not video_url.endswith('.f4m'):
auth_url = video_url.replace(
diff --git a/youtube_dl/extractor/rtvnh.py b/youtube_dl/extractor/rtvnh.py
new file mode 100644
index 000000000..7c9d4b0cd
--- /dev/null
+++ b/youtube_dl/extractor/rtvnh.py
@@ -0,0 +1,47 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import ExtractorError
+
+
+class RTVNHIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.rtvnh.nl/video/131946',
+ 'md5': '6e1d0ab079e2a00b6161442d3ceacfc1',
+ 'info_dict': {
+ 'id': '131946',
+ 'ext': 'mp4',
+ 'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw',
+ 'thumbnail': 're:^https?:.*\.jpg$'
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ meta = self._parse_json(self._download_webpage(
+ 'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id)
+
+ status = meta.get('status')
+ if status != 200:
+ raise ExtractorError(
+ '%s returned error code %d' % (self.IE_NAME, status), expected=True)
+
+ formats = self._extract_smil_formats(
+ 'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id, fatal=False)
+
+ for item in meta['source']['fb']:
+ if item.get('type') == 'hls':
+ formats.extend(self._extract_m3u8_formats(
+ item['file'], video_id, ext='mp4', entry_protocol='m3u8_native'))
+ elif item.get('type') == '':
+ formats.append({'url': item['file']})
+
+ return {
+ 'id': video_id,
+ 'title': meta['title'].strip(),
+ 'thumbnail': meta.get('image'),
+ 'formats': formats
+ }
diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py
index 5b1c3577a..d94dc7399 100644
--- a/youtube_dl/extractor/rutube.py
+++ b/youtube_dl/extractor/rutube.py
@@ -30,6 +30,7 @@ class RutubeIE(InfoExtractor):
'uploader': 'NTDRussian',
'uploader_id': '29790',
'upload_date': '20131016',
+ 'age_limit': 0,
},
'params': {
# It requires ffmpeg (m3u8 download)
diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py
index 4e22628d0..c67ad25ce 100644
--- a/youtube_dl/extractor/ruutu.py
+++ b/youtube_dl/extractor/ruutu.py
@@ -6,19 +6,19 @@ from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
int_or_none,
+ xpath_attr,
xpath_text,
)
class RuutuIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?ruutu\.fi/ohjelmat/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
+ _VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)'
_TESTS = [
{
- 'url': 'http://www.ruutu.fi/ohjelmat/oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi',
+ 'url': 'http://www.ruutu.fi/video/2058907',
'md5': 'ab2093f39be1ca8581963451b3c0234f',
'info_dict': {
'id': '2058907',
- 'display_id': 'oletko-aina-halunnut-tietaa-mita-tapahtuu-vain-hetki-ennen-lahetysta-nyt-se-selvisi',
'ext': 'mp4',
'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
@@ -28,14 +28,13 @@ class RuutuIE(InfoExtractor):
},
},
{
- 'url': 'http://www.ruutu.fi/ohjelmat/superpesis/superpesis-katso-koko-kausi-ruudussa',
+ 'url': 'http://www.ruutu.fi/video/2057306',
'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
'info_dict': {
'id': '2057306',
- 'display_id': 'superpesis-katso-koko-kausi-ruudussa',
'ext': 'mp4',
'title': 'Superpesis: katso koko kausi Ruudussa',
- 'description': 'md5:44c44a99fdbe5b380ab74ebd75f0af77',
+ 'description': 'md5:da2736052fef3b2bd5e0005e63c25eac',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 40,
'age_limit': 0,
@@ -44,29 +43,10 @@ class RuutuIE(InfoExtractor):
]
def _real_extract(self, url):
- display_id = self._match_id(url)
+ video_id = self._match_id(url)
- webpage = self._download_webpage(url, display_id)
-
- video_id = self._search_regex(
- r'data-media-id="(\d+)"', webpage, 'media id')
-
- video_xml_url = None
-
- media_data = self._search_regex(
- r'jQuery\.extend\([^,]+,\s*(.+?)\);', webpage,
- 'media data', default=None)
- if media_data:
- media_json = self._parse_json(media_data, display_id, fatal=False)
- if media_json:
- xml_url = media_json.get('ruutuplayer', {}).get('xmlUrl')
- if xml_url:
- video_xml_url = xml_url.replace('{ID}', video_id)
-
- if not video_xml_url:
- video_xml_url = 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id
-
- video_xml = self._download_xml(video_xml_url, video_id)
+ video_xml = self._download_xml(
+ 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id)
formats = []
processed_urls = []
@@ -109,10 +89,9 @@ class RuutuIE(InfoExtractor):
return {
'id': video_id,
- 'display_id': display_id,
- 'title': self._og_search_title(webpage),
- 'description': self._og_search_description(webpage),
- 'thumbnail': self._og_search_thumbnail(webpage),
+ 'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True),
+ 'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'),
+ 'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'),
'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
'formats': formats,
diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py
index f3c80708c..a602af692 100644
--- a/youtube_dl/extractor/safari.py
+++ b/youtube_dl/extractor/safari.py
@@ -20,7 +20,6 @@ from ..utils import (
class SafariBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.safaribooksonline.com/accounts/login/'
_SUCCESSFUL_LOGIN_REGEX = r'<a href="/accounts/logout/"[^>]*>Sign Out</a>'
- _ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to supply credentials for safaribooksonline.com'
_NETRC_MACHINE = 'safari'
_API_BASE = 'https://www.safaribooksonline.com/api/v1/book'
@@ -37,9 +36,7 @@ class SafariBaseIE(InfoExtractor):
def _login(self):
(username, password) = self._get_login_info()
if username is None:
- raise ExtractorError(
- self._ACCOUNT_CREDENTIALS_HINT,
- expected=True)
+ self.raise_login_required('safaribooksonline.com account is required')
headers = std_headers
if 'Referer' not in headers:
diff --git a/youtube_dl/extractor/screenwavemedia.py b/youtube_dl/extractor/screenwavemedia.py
index d1ab66b32..05f93904c 100644
--- a/youtube_dl/extractor/screenwavemedia.py
+++ b/youtube_dl/extractor/screenwavemedia.py
@@ -7,12 +7,13 @@ from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
+ js_to_json,
)
class ScreenwaveMediaIE(InfoExtractor):
- _VALID_URL = r'http://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?[^"]*\bid=(?P<id>.+)'
-
+ _VALID_URL = r'https?://player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=(?P<id>[A-Za-z0-9-]+)'
+ EMBED_PATTERN = r'src=(["\'])(?P<url>(?:https?:)?//player\d?\.screenwavemedia\.com/(?:play/)?[a-zA-Z]+\.php\?.*\bid=.+?)\1'
_TESTS = [{
'url': 'http://player.screenwavemedia.com/play/play.php?playerdiv=videoarea&companiondiv=squareAd&id=Cinemassacre-19911',
'only_matching': True,
@@ -22,59 +23,71 @@ class ScreenwaveMediaIE(InfoExtractor):
video_id = self._match_id(url)
playerdata = self._download_webpage(
- 'http://player.screenwavemedia.com/play/player.php?id=%s' % video_id,
+ 'http://player.screenwavemedia.com/player.php?id=%s' % video_id,
video_id, 'Downloading player webpage')
vidtitle = self._search_regex(
r'\'vidtitle\'\s*:\s*"([^"]+)"', playerdata, 'vidtitle').replace('\\/', '/')
- vidurl = self._search_regex(
- r'\'vidurl\'\s*:\s*"([^"]+)"', playerdata, 'vidurl').replace('\\/', '/')
-
- videolist_url = None
-
- mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
- if mobj:
- videoserver = mobj.group('videoserver')
- mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
- vidid = mobj.group('vidid') if mobj else video_id
- videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
- else:
- mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
- if mobj:
- videolist_url = mobj.group('smil')
-
- if videolist_url:
- videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
- formats = []
- baseurl = vidurl[:vidurl.rfind('/') + 1]
- for video in videolist.findall('.//video'):
- src = video.get('src')
- if not src:
+
+ playerconfig = self._download_webpage(
+ 'http://player.screenwavemedia.com/player.js',
+ video_id, 'Downloading playerconfig webpage')
+
+ videoserver = self._search_regex(r'SWMServer\s*=\s*"([\d\.]+)"', playerdata, 'videoserver')
+
+ sources = self._parse_json(
+ js_to_json(
+ re.sub(
+ r'(?s)/\*.*?\*/', '',
+ self._search_regex(
+ r"sources\s*:\s*(\[[^\]]+?\])", playerconfig,
+ 'sources',
+ ).replace(
+ "' + thisObj.options.videoserver + '",
+ videoserver
+ ).replace(
+ "' + playerVidId + '",
+ video_id
+ )
+ )
+ ),
+ video_id, fatal=False
+ )
+
+ # Fallback to hardcoded sources if JS changes again
+ if not sources:
+ self.report_warning('Falling back to a hardcoded list of streams')
+ sources = [{
+ 'file': 'http://%s/vod/%s_%s.mp4' % (videoserver, video_id, format_id),
+ 'type': 'mp4',
+ 'label': format_label,
+ } for format_id, format_label in (
+ ('low', '144p Low'), ('med', '160p Med'), ('high', '360p High'), ('hd1', '720p HD1'))]
+ sources.append({
+ 'file': 'http://%s/vod/smil:%s.smil/playlist.m3u8' % (videoserver, video_id),
+ 'type': 'hls',
+ })
+
+ formats = []
+ for source in sources:
+ if source['type'] == 'hls':
+ formats.extend(self._extract_m3u8_formats(source['file'], video_id))
+ else:
+ file_ = source.get('file')
+ if not file_:
continue
- file_ = src.partition(':')[-1]
- width = int_or_none(video.get('width'))
- height = int_or_none(video.get('height'))
- bitrate = int_or_none(video.get('system-bitrate'), scale=1000)
- format = {
- 'url': baseurl + file_,
- 'format_id': src.rpartition('.')[0].rpartition('_')[-1],
- }
- if width or height:
- format.update({
- 'tbr': bitrate,
- 'width': width,
- 'height': height,
- })
- else:
- format.update({
- 'abr': bitrate,
- 'vcodec': 'none',
- })
- formats.append(format)
- else:
- formats = [{
- 'url': vidurl,
- }]
+ format_label = source.get('label')
+ format_id = self._search_regex(
+ r'_(.+?)\.[^.]+$', file_, 'format id', default=None)
+ height = int_or_none(self._search_regex(
+ r'^(\d+)[pP]', format_label, 'height', default=None))
+ formats.append({
+ 'url': source['file'],
+ 'format_id': format_id,
+ 'format': format_label,
+ 'ext': source.get('type'),
+ 'height': height,
+ })
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/sexykarma.py b/youtube_dl/extractor/sexykarma.py
index 6446d26dc..e33483674 100644
--- a/youtube_dl/extractor/sexykarma.py
+++ b/youtube_dl/extractor/sexykarma.py
@@ -29,6 +29,7 @@ class SexyKarmaIE(InfoExtractor):
'view_count': int,
'comment_count': int,
'categories': list,
+ 'age_limit': 18,
}
}, {
'url': 'http://www.sexykarma.com/gonewild/video/pot-pixie-tribute-8Id6EZPbuHf.html',
diff --git a/youtube_dl/extractor/shahid.py b/youtube_dl/extractor/shahid.py
new file mode 100644
index 000000000..6e9903d5e
--- /dev/null
+++ b/youtube_dl/extractor/shahid.py
@@ -0,0 +1,107 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_urllib_parse
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ parse_iso8601,
+)
+
+
+class ShahidIE(InfoExtractor):
+ _VALID_URL = r'https?://shahid\.mbc\.net/ar/episode/(?P<id>\d+)/?'
+ _TESTS = [{
+ 'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html',
+ 'info_dict': {
+ 'id': '90574',
+ 'ext': 'm3u8',
+ 'title': 'الملك عبدالله الإنسان الموسم 1 كليب 3',
+ 'description': 'الفيلم الوثائقي - الملك عبد الله الإنسان',
+ 'duration': 2972,
+ 'timestamp': 1422057420,
+ 'upload_date': '20150123',
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ }
+ }, {
+ # shahid plus subscriber only
+ 'url': 'https://shahid.mbc.net/ar/episode/90511/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1.html',
+ 'only_matching': True
+ }]
+
+ def _handle_error(self, response):
+ if not isinstance(response, dict):
+ return
+ error = response.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, '\n'.join(error.values())),
+ expected=True)
+
+ def _download_json(self, url, video_id, note='Downloading JSON metadata'):
+ response = super(ShahidIE, self)._download_json(url, video_id, note)['data']
+ self._handle_error(response)
+ return response
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, video_id)
+
+ api_vars = {
+ 'id': video_id,
+ 'type': 'player',
+ 'url': 'http://api.shahid.net/api/v1_1',
+ 'playerType': 'episode',
+ }
+
+ flashvars = self._search_regex(
+ r'var\s+flashvars\s*=\s*({[^}]+})', webpage, 'flashvars', default=None)
+ if flashvars:
+ for key in api_vars.keys():
+ value = self._search_regex(
+ r'\b%s\s*:\s*(?P<q>["\'])(?P<value>.+?)(?P=q)' % key,
+ flashvars, 'type', default=None, group='value')
+ if value:
+ api_vars[key] = value
+
+ player = self._download_json(
+ 'https://shahid.mbc.net/arContent/getPlayerContent-param-.id-%s.type-%s.html'
+ % (video_id, api_vars['type']), video_id, 'Downloading player JSON')
+
+ formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4')
+
+ video = self._download_json(
+ '%s/%s/%s?%s' % (
+ api_vars['url'], api_vars['playerType'], api_vars['id'],
+ compat_urllib_parse.urlencode({
+ 'apiKey': 'sh@hid0nlin3',
+ 'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
+ }).encode('utf-8')),
+ video_id, 'Downloading video JSON')
+
+ video = video[api_vars['playerType']]
+
+ title = video['title']
+ description = video.get('description')
+ thumbnail = video.get('thumbnailUrl')
+ duration = int_or_none(video.get('duration'))
+ timestamp = parse_iso8601(video.get('referenceDate'))
+ categories = [
+ category['name']
+ for category in video.get('genres', []) if 'name' in category]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'categories': categories,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/shared.py b/youtube_dl/extractor/shared.py
index a07677686..c5636e8e9 100644
--- a/youtube_dl/extractor/shared.py
+++ b/youtube_dl/extractor/shared.py
@@ -14,17 +14,28 @@ from ..utils import (
class SharedIE(InfoExtractor):
- _VALID_URL = r'http://shared\.sx/(?P<id>[\da-z]{10})'
+ IE_DESC = 'shared.sx and vivo.sx'
+ _VALID_URL = r'http://(?:shared|vivo)\.sx/(?P<id>[\da-z]{10})'
- _TEST = {
+ _TESTS = [{
'url': 'http://shared.sx/0060718775',
'md5': '106fefed92a8a2adb8c98e6a0652f49b',
'info_dict': {
'id': '0060718775',
'ext': 'mp4',
'title': 'Bmp4',
+ 'filesize': 1720110,
},
- }
+ }, {
+ 'url': 'http://vivo.sx/d7ddda0e78',
+ 'md5': '15b3af41be0b4fe01f4df075c2678b2c',
+ 'info_dict': {
+ 'id': 'd7ddda0e78',
+ 'ext': 'mp4',
+ 'title': 'Chicken',
+ 'filesize': 528031,
+ },
+ }]
def _real_extract(self, url):
video_id = self._match_id(url)
diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py
index 93a7cfe15..35a81ee87 100644
--- a/youtube_dl/extractor/smotri.py
+++ b/youtube_dl/extractor/smotri.py
@@ -330,10 +330,7 @@ class SmotriBroadcastIE(InfoExtractor):
(username, password) = self._get_login_info()
if username is None:
- raise ExtractorError(
- 'Erotic broadcasts allowed only for registered users, '
- 'use --username and --password options to provide account credentials.',
- expected=True)
+ self.raise_login_required('Erotic broadcasts allowed only for registered users')
login_form = {
'login-hint53': '1',
diff --git a/youtube_dl/extractor/snagfilms.py b/youtube_dl/extractor/snagfilms.py
index cf495f310..6977afb27 100644
--- a/youtube_dl/extractor/snagfilms.py
+++ b/youtube_dl/extractor/snagfilms.py
@@ -24,6 +24,15 @@ class SnagFilmsEmbedIE(InfoExtractor):
'title': '#whilewewatch',
}
}, {
+ # invalid labels, 360p is better that 480p
+ 'url': 'http://www.snagfilms.com/embed/player?filmId=17ca0950-a74a-11e0-a92a-0026bb61d036',
+ 'md5': '882fca19b9eb27ef865efeeaed376a48',
+ 'info_dict': {
+ 'id': '17ca0950-a74a-11e0-a92a-0026bb61d036',
+ 'ext': 'mp4',
+ 'title': 'Life in Limbo',
+ }
+ }, {
'url': 'http://www.snagfilms.com/embed/player?filmId=0000014c-de2f-d5d6-abcf-ffef58af0017',
'only_matching': True,
}]
@@ -52,14 +61,15 @@ class SnagFilmsEmbedIE(InfoExtractor):
if not file_:
continue
type_ = source.get('type')
- format_id = source.get('label')
ext = determine_ext(file_)
- if any(_ == 'm3u8' for _ in (type_, ext)):
+ format_id = source.get('label') or ext
+ if all(v == 'm3u8' for v in (type_, ext)):
formats.extend(self._extract_m3u8_formats(
file_, video_id, 'mp4', m3u8_id='hls'))
else:
bitrate = int_or_none(self._search_regex(
- r'(\d+)kbps', file_, 'bitrate', default=None))
+ [r'(\d+)kbps', r'_\d{1,2}x\d{1,2}_(\d{3,})\.%s' % ext],
+ file_, 'bitrate', default=None))
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None))
formats.append({
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index 118ca4832..ed5dcc0d3 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -29,7 +29,7 @@ class SoundcloudIE(InfoExtractor):
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?P<uploader>[\w\d-]+)/
- (?!sets/|(?:likes|tracks)/?(?:$|[?#]))
+ (?!(?:tracks|sets(?:/[^/?#]+)?|reposts|likes|spotlight)/?(?:$|[?#]))
(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
|(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)
@@ -282,69 +282,150 @@ class SoundcloudSetIE(SoundcloudIE):
msgs = (compat_str(err['error_message']) for err in info['errors'])
raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))
+ entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in info['tracks']]
+
return {
'_type': 'playlist',
- 'entries': [self._extract_info_dict(track, secret_token=token) for track in info['tracks']],
+ 'entries': entries,
'id': '%s' % info['id'],
'title': info['title'],
}
class SoundcloudUserIE(SoundcloudIE):
- _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<user>[^/]+)/?((?P<rsrc>tracks|likes)/?)?(\?.*)?$'
+ _VALID_URL = r'''(?x)
+ https?://
+ (?:(?:www|m)\.)?soundcloud\.com/
+ (?P<user>[^/]+)
+ (?:/
+ (?P<rsrc>tracks|sets|reposts|likes|spotlight)
+ )?
+ /?(?:[?#].*)?$
+ '''
IE_NAME = 'soundcloud:user'
_TESTS = [{
- 'url': 'https://soundcloud.com/the-concept-band',
+ 'url': 'https://soundcloud.com/the-akashic-chronicler',
'info_dict': {
- 'id': '9615865',
- 'title': 'The Royal Concept',
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (All)',
},
- 'playlist_mincount': 12
+ 'playlist_mincount': 111,
}, {
- 'url': 'https://soundcloud.com/the-concept-band/likes',
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/tracks',
'info_dict': {
- 'id': '9615865',
- 'title': 'The Royal Concept',
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Tracks)',
},
- 'playlist_mincount': 1,
+ 'playlist_mincount': 50,
}, {
- 'url': 'https://soundcloud.com/the-akashic-chronicler/tracks',
- 'only_matching': True,
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/sets',
+ 'info_dict': {
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Playlists)',
+ },
+ 'playlist_mincount': 3,
+ }, {
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/reposts',
+ 'info_dict': {
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Reposts)',
+ },
+ 'playlist_mincount': 7,
+ }, {
+ 'url': 'https://soundcloud.com/the-akashic-chronicler/likes',
+ 'info_dict': {
+ 'id': '114582580',
+ 'title': 'The Akashic Chronicler (Likes)',
+ },
+ 'playlist_mincount': 321,
+ }, {
+ 'url': 'https://soundcloud.com/grynpyret/spotlight',
+ 'info_dict': {
+ 'id': '7098329',
+ 'title': 'Grynpyret (Spotlight)',
+ },
+ 'playlist_mincount': 1,
}]
+ _API_BASE = 'https://api.soundcloud.com'
+ _API_V2_BASE = 'https://api-v2.soundcloud.com'
+
+ _BASE_URL_MAP = {
+ 'all': '%s/profile/soundcloud:users:%%s' % _API_V2_BASE,
+ 'tracks': '%s/users/%%s/tracks' % _API_BASE,
+ 'sets': '%s/users/%%s/playlists' % _API_V2_BASE,
+ 'reposts': '%s/profile/soundcloud:users:%%s/reposts' % _API_V2_BASE,
+ 'likes': '%s/users/%%s/likes' % _API_V2_BASE,
+ 'spotlight': '%s/users/%%s/spotlight' % _API_V2_BASE,
+ }
+
+ _TITLE_MAP = {
+ 'all': 'All',
+ 'tracks': 'Tracks',
+ 'sets': 'Playlists',
+ 'reposts': 'Reposts',
+ 'likes': 'Likes',
+ 'spotlight': 'Spotlight',
+ }
+
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group('user')
- resource = mobj.group('rsrc')
- if resource is None:
- resource = 'tracks'
- elif resource == 'likes':
- resource = 'favorites'
url = 'http://soundcloud.com/%s/' % uploader
resolv_url = self._resolv_url(url)
user = self._download_json(
resolv_url, uploader, 'Downloading user info')
- base_url = 'http://api.soundcloud.com/users/%s/%s.json?' % (uploader, resource)
+
+ resource = mobj.group('rsrc') or 'all'
+ base_url = self._BASE_URL_MAP[resource] % user['id']
+
+ next_href = None
entries = []
for i in itertools.count():
- data = compat_urllib_parse.urlencode({
- 'offset': i * 50,
- 'limit': 50,
- 'client_id': self._CLIENT_ID,
- })
- new_entries = self._download_json(
- base_url + data, uploader, 'Downloading track page %s' % (i + 1))
- if len(new_entries) == 0:
+ if not next_href:
+ data = compat_urllib_parse.urlencode({
+ 'offset': i * 50,
+ 'limit': 50,
+ 'client_id': self._CLIENT_ID,
+ 'linked_partitioning': '1',
+ 'representation': 'speedy',
+ })
+ next_href = base_url + '?' + data
+
+ response = self._download_json(
+ next_href, uploader, 'Downloading track page %s' % (i + 1))
+
+ collection = response['collection']
+
+ if not collection:
self.to_screen('%s: End page received' % uploader)
break
- entries.extend(self.url_result(e['permalink_url'], 'Soundcloud') for e in new_entries)
+
+ def resolve_permalink_url(candidates):
+ for cand in candidates:
+ if isinstance(cand, dict):
+ permalink_url = cand.get('permalink_url')
+ if permalink_url and permalink_url.startswith('http'):
+ return permalink_url
+
+ for e in collection:
+ permalink_url = resolve_permalink_url((e, e.get('track'), e.get('playlist')))
+ if permalink_url:
+ entries.append(self.url_result(permalink_url))
+
+ if 'next_href' in response:
+ next_href = response['next_href']
+ if not next_href:
+ break
+ else:
+ next_href = None
return {
'_type': 'playlist',
'id': compat_str(user['id']),
- 'title': user['username'],
+ 'title': '%s (%s)' % (user['username'], self._TITLE_MAP[resource]),
'entries': entries,
}
@@ -379,9 +460,7 @@ class SoundcloudPlaylistIE(SoundcloudIE):
data = self._download_json(
base_url + data, playlist_id, 'Downloading playlist')
- entries = [
- self._extract_info_dict(t, quiet=True, secret_token=token)
- for t in data['tracks']]
+ entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in data['tracks']]
return {
'_type': 'playlist',
diff --git a/youtube_dl/extractor/southpark.py b/youtube_dl/extractor/southpark.py
index 7fb165a87..87b650468 100644
--- a/youtube_dl/extractor/southpark.py
+++ b/youtube_dl/extractor/southpark.py
@@ -45,6 +45,14 @@ class SouthParkDeIE(SouthParkIE):
'title': 'The Government Won\'t Respect My Privacy',
'description': 'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.',
},
+ }, {
+ # non-ASCII characters in initial URL
+ 'url': 'http://www.southpark.de/alle-episoden/s18e09-hashtag-aufwärmen',
+ 'playlist_count': 4,
+ }, {
+ # non-ASCII characters in redirect URL
+ 'url': 'http://www.southpark.de/alle-episoden/s18e09',
+ 'playlist_count': 4,
}]
diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py
index 5fa6faf18..9e8fb35b2 100644
--- a/youtube_dl/extractor/spankwire.py
+++ b/youtube_dl/extractor/spankwire.py
@@ -16,8 +16,9 @@ from ..aes import aes_decrypt_text
class SpankwireIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<videoid>[0-9]+)/?)'
- _TEST = {
+ _VALID_URL = r'https?://(?:www\.)?(?P<url>spankwire\.com/[^/]*/video(?P<id>[0-9]+)/?)'
+ _TESTS = [{
+ # download URL pattern: */<height>P_<tbr>K_<video_id>.mp4
'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
'md5': '8bbfde12b101204b39e4b9fe7eb67095',
'info_dict': {
@@ -30,14 +31,27 @@ class SpankwireIE(InfoExtractor):
'upload_date': '20070507',
'age_limit': 18,
}
- }
+ }, {
+ # download URL pattern: */mp4_<format_id>_<video_id>.mp4
+ 'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/',
+ 'md5': '09b3c20833308b736ae8902db2f8d7e6',
+ 'info_dict': {
+ 'id': '1921551',
+ 'ext': 'mp4',
+ 'title': 'Titcums Compiloation I',
+ 'description': 'cum on tits',
+ 'uploader': 'dannyh78999',
+ 'uploader_id': '3056053',
+ 'upload_date': '20150822',
+ 'age_limit': 18,
+ },
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('videoid')
- url = 'http://www.' + mobj.group('url')
+ video_id = mobj.group('id')
- req = compat_urllib_request.Request(url)
+ req = compat_urllib_request.Request('http://www.' + mobj.group('url'))
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
@@ -54,7 +68,7 @@ class SpankwireIE(InfoExtractor):
r'by:\s*<a [^>]*>(.+?)</a>',
webpage, 'uploader', fatal=False)
uploader_id = self._html_search_regex(
- r'by:\s*<a href="/Profile\.aspx\?.*?UserId=(\d+).*?"',
+ r'by:\s*<a href="/(?:user/viewProfile|Profile\.aspx)\?.*?UserId=(\d+).*?"',
webpage, 'uploader id', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r'</a> on (.+?) at \d+:\d+',
@@ -67,9 +81,10 @@ class SpankwireIE(InfoExtractor):
r'<span\s+id="spCommentCount"[^>]*>([\d,\.]+)</span>',
webpage, 'comment count', fatal=False))
- video_urls = list(map(
- compat_urllib_parse_unquote,
- re.findall(r'playerData\.cdnPath[0-9]{3,}\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage)))
+ videos = re.findall(
+ r'playerData\.cdnPath([0-9]{3,})\s*=\s*(?:encodeURIComponent\()?["\']([^"\']+)["\']', webpage)
+ heights = [int(video[0]) for video in videos]
+ video_urls = list(map(compat_urllib_parse_unquote, [video[1] for video in videos]))
if webpage.find('flashvars\.encrypted = "true"') != -1:
password = self._search_regex(
r'flashvars\.video_title = "([^"]+)',
@@ -79,21 +94,22 @@ class SpankwireIE(InfoExtractor):
video_urls))
formats = []
- for video_url in video_urls:
+ for height, video_url in zip(heights, video_urls):
path = compat_urllib_parse_urlparse(video_url).path
- format = path.split('/')[4].split('_')[:2]
- resolution, bitrate_str = format
- format = "-".join(format)
- height = int(resolution.rstrip('Pp'))
- tbr = int(bitrate_str.rstrip('Kk'))
- formats.append({
+ _, quality = path.split('/')[4].split('_')[:2]
+ f = {
'url': video_url,
- 'resolution': resolution,
- 'format': format,
- 'tbr': tbr,
'height': height,
- 'format_id': format,
- })
+ }
+ tbr = self._search_regex(r'^(\d+)[Kk]$', quality, 'tbr', default=None)
+ if tbr:
+ f.update({
+ 'tbr': int(tbr),
+ 'format_id': '%dp' % height,
+ })
+ else:
+ f['format_id'] = quality
+ formats.append(f)
self._sort_formats(formats)
age_limit = self._rta_search(webpage)
diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
index b868241d5..5bd3c0087 100644
--- a/youtube_dl/extractor/spiegel.py
+++ b/youtube_dl/extractor/spiegel.py
@@ -9,7 +9,7 @@ from .spiegeltv import SpiegeltvIE
class SpiegelIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed)?(?:\.html)?(?:#.*)?$'
+ _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
_TESTS = [{
'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
'md5': '2c2754212136f35fb4b19767d242f66e',
@@ -39,6 +39,9 @@ class SpiegelIE(InfoExtractor):
'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
}
+ }, {
+ 'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html',
+ 'only_matching': True,
}]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/sportdeutschland.py b/youtube_dl/extractor/sportdeutschland.py
index 1a57aebf1..7ec6c613f 100644
--- a/youtube_dl/extractor/sportdeutschland.py
+++ b/youtube_dl/extractor/sportdeutschland.py
@@ -38,10 +38,12 @@ class SportDeutschlandIE(InfoExtractor):
'upload_date': '20140825',
'description': 'md5:60a20536b57cee7d9a4ec005e8687504',
'timestamp': 1408976060,
+ 'duration': 2732,
'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
'categories': ['Li-Ning Badminton WM 2014'],
+
}
}]
@@ -50,7 +52,7 @@ class SportDeutschlandIE(InfoExtractor):
video_id = mobj.group('id')
sport_id = mobj.group('sport')
- api_url = 'http://splink.tv/api/permalinks/%s/%s' % (
+ api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % (
sport_id, video_id)
req = compat_urllib_request.Request(api_url, headers={
'Accept': 'application/vnd.vidibus.v2.html+json',
@@ -58,12 +60,11 @@ class SportDeutschlandIE(InfoExtractor):
})
data = self._download_json(req, video_id)
- categories = list(data.get('section', {}).get('tags', {}).values())
asset = data['asset']
- assets_info = self._download_json(asset['url'], video_id)
+ categories = [data['section']['title']]
formats = []
- smil_url = assets_info['video']
+ smil_url = asset['video']
if '.smil' in smil_url:
m3u8_url = smil_url.replace('.smil', '.m3u8')
formats.extend(
@@ -91,6 +92,7 @@ class SportDeutschlandIE(InfoExtractor):
'title': asset['title'],
'thumbnail': asset.get('image'),
'description': asset.get('teaser'),
+ 'duration': asset.get('duration'),
'categories': categories,
'view_count': asset.get('views'),
'rtmp_live': asset.get('live'),
diff --git a/youtube_dl/extractor/tagesschau.py b/youtube_dl/extractor/tagesschau.py
index bfe07b024..73e7657d4 100644
--- a/youtube_dl/extractor/tagesschau.py
+++ b/youtube_dl/extractor/tagesschau.py
@@ -8,17 +8,17 @@ from ..utils import parse_filesize
class TagesschauIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:sendung/ts|video/video)(?P<id>-?[0-9]+)\.html'
+ _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?:[^/]+/)*?[^/#?]+?(?P<id>-?[0-9]+)(?:~_[^/#?]+?)?\.html'
_TESTS = [{
- 'url': 'http://www.tagesschau.de/multimedia/video/video1399128.html',
- 'md5': 'bcdeac2194fb296d599ce7929dfa4009',
+ 'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
+ 'md5': '917a228bc7df7850783bc47979673a09',
'info_dict': {
- 'id': '1399128',
+ 'id': '102143',
'ext': 'mp4',
- 'title': 'Harald Range, Generalbundesanwalt, zu den Ermittlungen',
- 'description': 'md5:69da3c61275b426426d711bde96463ab',
- 'thumbnail': 're:^http:.*\.jpg$',
+ 'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
+ 'description': 'md5:171feccd9d9b3dd54d05d501568f6359',
+ 'thumbnail': 're:^https?:.*\.jpg$',
},
}, {
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
@@ -28,8 +28,39 @@ class TagesschauIE(InfoExtractor):
'ext': 'mp4',
'description': 'md5:695c01bfd98b7e313c501386327aea59',
'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
- 'thumbnail': 're:^http:.*\.jpg$',
- }
+ 'thumbnail': 're:^https?:.*\.jpg$',
+ },
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/politikimradio/audio-18407.html',
+ 'md5': 'aef45de271c4bf0a5db834aa40bf774c',
+ 'info_dict': {
+ 'id': '18407',
+ 'ext': 'mp3',
+ 'title': 'Flüchtlingsdebatte: Hitzig, aber wenig hilfreich',
+ 'description': 'Flüchtlingsdebatte: Hitzig, aber wenig hilfreich',
+ 'thumbnail': 're:^https?:.*\.jpg$',
+ },
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/tt-3827.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/nm-3475.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/weltspiegel-3167.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/tsvorzwanzig-959.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html',
+ 'only_matching': True,
}]
_FORMATS = {
@@ -49,19 +80,26 @@ class TagesschauIE(InfoExtractor):
playerpage = self._download_webpage(
player_url, display_id, 'Downloading player page')
- medias = re.findall(
- r'"(http://media.+?)", type:"video/(.+?)", quality:"(.+?)"',
- playerpage)
formats = []
- for url, ext, res in medias:
+ for media in re.finditer(
+ r'''(?x)
+ (?P<q_url>["\'])(?P<url>http://media.+?)(?P=q_url)
+ ,\s*type:(?P<q_type>["\'])(?P<type>video|audio)/(?P<ext>.+?)(?P=q_type)
+ (?:,\s*quality:(?P<q_quality>["\'])(?P<quality>.+?)(?P=q_quality))?
+ ''', playerpage):
+ url = media.group('url')
+ type_ = media.group('type')
+ ext = media.group('ext')
+ res = media.group('quality')
f = {
- 'format_id': res + '_' + ext,
+ 'format_id': '%s_%s' % (res, ext) if res else ext,
'url': url,
'ext': ext,
+ 'vcodec': 'none' if type_ == 'audio' else None,
}
f.update(self._FORMATS.get(res, {}))
formats.append(f)
- thumbnail_fn = re.findall(r'"(/multimedia/.+?\.jpg)"', playerpage)[-1]
+ thumbnail = self._og_search_thumbnail(playerpage)
title = self._og_search_title(webpage).strip()
description = self._og_search_description(webpage).strip()
else:
@@ -99,17 +137,14 @@ class TagesschauIE(InfoExtractor):
'filesize_approx': parse_filesize(m.group('filesize_approx')),
})
formats.append(format)
- thumbnail_fn = self._search_regex(
- r'(?s)<img alt="Sendungsbild".*?src="([^"]+)"',
- webpage, 'thumbnail', fatal=False)
+ thumbnail = self._og_search_thumbnail(webpage)
description = self._html_search_regex(
r'(?s)<p class="teasertext">(.*?)</p>',
- webpage, 'description', fatal=False)
+ webpage, 'description', default=None)
title = self._html_search_regex(
r'<span class="headline".*?>(.*?)</span>', webpage, 'title')
self._sort_formats(formats)
- thumbnail = 'http://www.tagesschau.de' + thumbnail_fn
return {
'id': display_id,
diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py
index a0c744fd1..ae94f055c 100644
--- a/youtube_dl/extractor/telecinco.py
+++ b/youtube_dl/extractor/telecinco.py
@@ -6,7 +6,7 @@ from .mitele import MiTeleIE
class TelecincoIE(MiTeleIE):
IE_NAME = 'telecinco.es'
- _VALID_URL = r'https?://www\.telecinco\.es/[^/]+/[^/]+/(?:[^/]+/)?(?P<id>.*?)\.html'
+ _VALID_URL = r'https?://www\.telecinco\.es/(?:[^/]+/)+(?P<id>.+?)\.html'
_TESTS = [{
'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
@@ -23,4 +23,7 @@ class TelecincoIE(MiTeleIE):
}, {
'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html',
'only_matching': True,
+ }, {
+ 'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html',
+ 'only_matching': True,
}]
diff --git a/youtube_dl/extractor/telegraaf.py b/youtube_dl/extractor/telegraaf.py
new file mode 100644
index 000000000..6f8333cfc
--- /dev/null
+++ b/youtube_dl/extractor/telegraaf.py
@@ -0,0 +1,35 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import remove_end
+
+
+class TelegraafIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?telegraaf\.nl/tv/(?:[^/]+/)+(?P<id>\d+)/[^/]+\.html'
+ _TEST = {
+ 'url': 'http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html',
+ 'md5': '83245a9779bcc4a24454bfd53c65b6dc',
+ 'info_dict': {
+ 'id': '24353229',
+ 'ext': 'mp4',
+ 'title': 'Tikibad ontruimd wegens brand',
+ 'description': 'md5:05ca046ff47b931f9b04855015e163a4',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 33,
+ },
+ }
+
+ def _real_extract(self, url):
+ playlist_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, playlist_id)
+
+ playlist_url = self._search_regex(
+ r"iframe\.loadPlayer\('([^']+)'", webpage, 'player')
+
+ entries = self._extract_xspf_playlist(playlist_url, playlist_id)
+ title = remove_end(self._og_search_title(webpage), ' - VIDEO')
+ description = self._og_search_description(webpage)
+
+ return self.playlist_result(entries, playlist_id, title, description)
diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py
index 83d833e30..25edc3100 100644
--- a/youtube_dl/extractor/theplatform.py
+++ b/youtube_dl/extractor/theplatform.py
@@ -1,7 +1,7 @@
+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
-import json
import time
import hmac
import binascii
@@ -10,7 +10,8 @@ import hashlib
from .common import InfoExtractor
from ..compat import (
- compat_str,
+ compat_parse_qs,
+ compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
@@ -18,12 +19,69 @@ from ..utils import (
xpath_with_ns,
unsmuggle_url,
int_or_none,
+ url_basename,
+ float_or_none,
)
-_x = lambda p: xpath_with_ns(p, {'smil': 'http://www.w3.org/2005/SMIL21/Language'})
+default_ns = 'http://www.w3.org/2005/SMIL21/Language'
+_x = lambda p: xpath_with_ns(p, {'smil': default_ns})
-class ThePlatformIE(InfoExtractor):
+class ThePlatformBaseIE(InfoExtractor):
+ def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'):
+ meta = self._download_xml(smil_url, video_id, note=note)
+ try:
+ error_msg = next(
+ n.attrib['abstract']
+ for n in meta.findall(_x('.//smil:ref'))
+ if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired')
+ except StopIteration:
+ pass
+ else:
+ raise ExtractorError(error_msg, expected=True)
+
+ formats = self._parse_smil_formats(
+ meta, smil_url, video_id, namespace=default_ns,
+ # the parameters are from syfy.com, other sites may use others,
+ # they also work for nbc.com
+ f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'},
+ transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src))
+
+ for _format in formats:
+ ext = determine_ext(_format['url'])
+ if ext == 'once':
+ _format['ext'] = 'mp4'
+
+ self._sort_formats(formats)
+
+ subtitles = self._parse_smil_subtitles(meta, default_ns)
+
+ return formats, subtitles
+
+ def get_metadata(self, path, video_id):
+ info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
+ info = self._download_json(info_url, video_id)
+
+ subtitles = {}
+ captions = info.get('captions')
+ if isinstance(captions, list):
+ for caption in captions:
+ lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
+ subtitles[lang] = [{
+ 'ext': 'srt' if mime == 'text/srt' else 'ttml',
+ 'url': src,
+ }]
+
+ return {
+ 'title': info['title'],
+ 'subtitles': subtitles,
+ 'description': info['description'],
+ 'thumbnail': info['defaultThumbnailUrl'],
+ 'duration': int_or_none(info.get('duration'), 1000),
+ }
+
+
+class ThePlatformIE(ThePlatformBaseIE):
_VALID_URL = r'''(?x)
(?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/
(?:(?P<media>(?:[^/]+/)+select/media/)|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))?
@@ -67,6 +125,20 @@ class ThePlatformIE(InfoExtractor):
}, {
'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7',
'only_matching': True,
+ }, {
+ 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701',
+ 'md5': '734f3790fb5fc4903da391beeebc4836',
+ 'info_dict': {
+ 'id': 'tdy_or_siri_150701',
+ 'ext': 'mp4',
+ 'title': 'iPhone Siri’s sassy response to a math question has people talking',
+ 'description': 'md5:a565d1deadd5086f3331d57298ec6333',
+ 'duration': 83.0,
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'timestamp': 1435752600,
+ 'upload_date': '20150701',
+ 'categories': ['Today/Shows/Orange Room', 'Today/Sections/Money', 'Today/Topics/Tech', "Today/Topics/Editor's picks"],
+ },
}]
@staticmethod
@@ -101,6 +173,24 @@ class ThePlatformIE(InfoExtractor):
path += '/media'
path += '/' + video_id
+ qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+ if 'guid' in qs_dict:
+ webpage = self._download_webpage(url, video_id)
+ scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage)
+ feed_id = None
+ # feed id usually locates in the last script.
+ # Seems there's no pattern for the interested script filename, so
+ # I try one by one
+ for script in reversed(scripts):
+ feed_script = self._download_webpage(script, video_id, 'Downloading feed script')
+ feed_id = self._search_regex(r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None)
+ if feed_id is not None:
+ break
+ if feed_id is None:
+ raise ExtractorError('Unable to find feed id')
+ return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % (
+ provider_id, feed_id, qs_dict['guid'][0]))
+
if smuggled_data.get('force_smil_url', False):
smil_url = url
elif mobj.group('config'):
@@ -108,7 +198,11 @@ class ThePlatformIE(InfoExtractor):
config_url = config_url.replace('swf/', 'config/')
config_url = config_url.replace('onsite/', 'onsite/config/')
config = self._download_json(config_url, video_id, 'Downloading config')
- smil_url = config['releaseUrl'] + '&format=SMIL&formats=MPEG4&manifest=f4m'
+ if 'releaseUrl' in config:
+ release_url = config['releaseUrl']
+ else:
+ release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path
+ smil_url = release_url + '&format=SMIL&formats=MPEG4&manifest=f4m'
else:
smil_url = 'http://link.theplatform.com/s/%s/meta.smil?format=smil&mbr=true' % path
@@ -116,95 +210,85 @@ class ThePlatformIE(InfoExtractor):
if sig:
smil_url = self._sign_url(smil_url, sig['key'], sig['secret'])
- meta = self._download_xml(smil_url, video_id)
- try:
- error_msg = next(
- n.attrib['abstract']
- for n in meta.findall(_x('.//smil:ref'))
- if n.attrib.get('title') == 'Geographic Restriction' or n.attrib.get('title') == 'Expired')
- except StopIteration:
- pass
- else:
- raise ExtractorError(error_msg, expected=True)
+ formats, subtitles = self._extract_theplatform_smil(smil_url, video_id)
- info_url = 'http://link.theplatform.com/s/%s?format=preview' % path
- info_json = self._download_webpage(info_url, video_id)
- info = json.loads(info_json)
+ ret = self.get_metadata(path, video_id)
+ combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles)
+ ret.update({
+ 'id': video_id,
+ 'formats': formats,
+ 'subtitles': combined_subtitles,
+ })
+
+ return ret
+
+
+class ThePlatformFeedIE(ThePlatformBaseIE):
+ _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&byGuid=%s'
+ _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*byGuid=(?P<id>[a-zA-Z0-9_]+)'
+ _TEST = {
+ # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207
+ 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207',
+ 'md5': '22d2b84f058d3586efcd99e57d59d314',
+ 'info_dict': {
+ 'id': 'n_hardball_5biden_140207',
+ 'ext': 'mp4',
+ 'title': 'The Biden factor: will Joe run in 2016?',
+ 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'upload_date': '20140208',
+ 'timestamp': 1391824260,
+ 'duration': 467.0,
+ 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'],
+ },
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+
+ video_id = mobj.group('id')
+ provider_id = mobj.group('provider_id')
+ feed_id = mobj.group('feed_id')
+
+ real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, video_id)
+ feed = self._download_json(real_url, video_id)
+ entry = feed['entries'][0]
+ formats = []
subtitles = {}
- captions = info.get('captions')
- if isinstance(captions, list):
- for caption in captions:
- lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type')
- subtitles[lang] = [{
- 'ext': 'srt' if mime == 'text/srt' else 'ttml',
- 'url': src,
- }]
+ first_video_id = None
+ duration = None
+ for item in entry['media$content']:
+ smil_url = item['plfile$url'] + '&format=SMIL&Tracking=true&Embedded=true&formats=MPEG4,F4M'
+ cur_video_id = url_basename(smil_url)
+ if first_video_id is None:
+ first_video_id = cur_video_id
+ duration = float_or_none(item.get('plfile$duration'))
+ cur_formats, cur_subtitles = self._extract_theplatform_smil(smil_url, video_id, 'Downloading SMIL data for %s' % cur_video_id)
+ formats.extend(cur_formats)
+ subtitles = self._merge_subtitles(subtitles, cur_subtitles)
- head = meta.find(_x('smil:head'))
- body = meta.find(_x('smil:body'))
+ self._sort_formats(formats)
- f4m_node = body.find(_x('smil:seq//smil:video'))
- if f4m_node is None:
- f4m_node = body.find(_x('smil:seq/smil:video'))
- if f4m_node is not None and '.f4m' in f4m_node.attrib['src']:
- f4m_url = f4m_node.attrib['src']
- if 'manifest.f4m?' not in f4m_url:
- f4m_url += '?'
- # the parameters are from syfy.com, other sites may use others,
- # they also work for nbc.com
- f4m_url += '&g=UXWGVKRWHFSP&hdcore=3.0.3'
- formats = self._extract_f4m_formats(f4m_url, video_id)
- else:
- formats = []
- switch = body.find(_x('smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:par//smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:par/smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:par'))
- if switch is not None:
- base_url = head.find(_x('smil:meta')).attrib['base']
- for f in switch.findall(_x('smil:video')):
- attr = f.attrib
- width = int_or_none(attr.get('width'))
- height = int_or_none(attr.get('height'))
- vbr = int_or_none(attr.get('system-bitrate'), 1000)
- format_id = '%dx%d_%dk' % (width, height, vbr)
- formats.append({
- 'format_id': format_id,
- 'url': base_url,
- 'play_path': 'mp4:' + attr['src'],
- 'ext': 'flv',
- 'width': width,
- 'height': height,
- 'vbr': vbr,
- })
- else:
- switch = body.find(_x('smil:seq//smil:switch'))
- if switch is None:
- switch = body.find(_x('smil:seq/smil:switch'))
- for f in switch.findall(_x('smil:video')):
- attr = f.attrib
- vbr = int_or_none(attr.get('system-bitrate'), 1000)
- ext = determine_ext(attr['src'])
- if ext == 'once':
- ext = 'mp4'
- formats.append({
- 'format_id': compat_str(vbr),
- 'url': attr['src'],
- 'vbr': vbr,
- 'ext': ext,
- })
- self._sort_formats(formats)
+ thumbnails = [{
+ 'url': thumbnail['plfile$url'],
+ 'width': int_or_none(thumbnail.get('plfile$width')),
+ 'height': int_or_none(thumbnail.get('plfile$height')),
+ } for thumbnail in entry.get('media$thumbnails', [])]
- return {
+ timestamp = int_or_none(entry.get('media$availableDate'), scale=1000)
+ categories = [item['media$name'] for item in entry.get('media$categories', [])]
+
+ ret = self.get_metadata('%s/%s' % (provider_id, first_video_id), video_id)
+ subtitles = self._merge_subtitles(subtitles, ret['subtitles'])
+ ret.update({
'id': video_id,
- 'title': info['title'],
- 'subtitles': subtitles,
'formats': formats,
- 'description': info['description'],
- 'thumbnail': info['defaultThumbnailUrl'],
- 'duration': int_or_none(info.get('duration'), 1000),
- }
+ 'subtitles': subtitles,
+ 'thumbnails': thumbnails,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'categories': categories,
+ })
+
+ return ret
diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py
index 2c4b21807..4f86b3ee9 100644
--- a/youtube_dl/extractor/tubitv.py
+++ b/youtube_dl/extractor/tubitv.py
@@ -60,9 +60,7 @@ class TubiTvIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
if re.search(r"<(?:DIV|div) class='login-required-screen'>", webpage):
- raise ExtractorError(
- 'This video requires login, use --username and --password '
- 'options to provide account credentials.', expected=True)
+ self.raise_login_required('This video requires login')
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py
index 8095e18d2..e800477e2 100644
--- a/youtube_dl/extractor/tudou.py
+++ b/youtube_dl/extractor/tudou.py
@@ -29,6 +29,8 @@ class TudouIE(InfoExtractor):
}
}]
+ _PLAYER_URL = 'http://js.tudouui.com/bin/lingtong/PortalPlayer_177.swf'
+
def _url_for_id(self, id, quality=None):
info_url = "http://v2.tudou.com/f?id=" + str(id)
if quality:
@@ -54,6 +56,10 @@ class TudouIE(InfoExtractor):
thumbnail_url = self._search_regex(
r",pic:\s*[\"'](.+?)[\"']", webpage, 'thumbnail URL', fatal=False)
+ player_url = self._search_regex(
+ r"playerUrl\s*:\s*['\"](.+?\.swf)[\"']",
+ webpage, 'player URL', default=self._PLAYER_URL)
+
segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments')
segments = json.loads(segs_json)
# It looks like the keys are the arguments that have to be passed as
@@ -76,6 +82,9 @@ class TudouIE(InfoExtractor):
'ext': ext,
'title': title,
'thumbnail': thumbnail_url,
+ 'http_headers': {
+ 'Referer': player_url,
+ },
}
result.append(part_info)
diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 9ead13a91..3d3b635e4 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -4,8 +4,6 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from .pornhub import PornHubIE
-from .vimeo import VimeoIE
class TumblrIE(InfoExtractor):
@@ -60,26 +58,16 @@ class TumblrIE(InfoExtractor):
blog = m_url.group('blog_name')
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
- webpage = self._download_webpage(url, video_id)
-
- vid_me_embed_url = self._search_regex(
- r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
- webpage, 'vid.me embed', default=None)
- if vid_me_embed_url is not None:
- return self.url_result(vid_me_embed_url, 'Vidme')
-
- pornhub_url = PornHubIE._extract_url(webpage)
- if pornhub_url:
- return self.url_result(pornhub_url, 'PornHub')
-
- vimeo_url = VimeoIE._extract_vimeo_url(url, webpage)
- if vimeo_url:
- return self.url_result(vimeo_url, 'Vimeo')
+ webpage, urlh = self._download_webpage_handle(url, video_id)
iframe_url = self._search_regex(
r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
- webpage, 'iframe url')
- iframe = self._download_webpage(iframe_url, video_id)
+ webpage, 'iframe url', default=None)
+ if iframe_url is None:
+ return self.url_result(urlh.geturl(), 'Generic')
+
+ iframe = self._download_webpage(iframe_url, video_id,
+ 'Downloading iframe page')
video_url = self._search_regex(r'<source src="([^"]+)"',
iframe, 'video url')
diff --git a/youtube_dl/extractor/tvplay.py b/youtube_dl/extractor/tvplay.py
index 79863e781..b4683de54 100644
--- a/youtube_dl/extractor/tvplay.py
+++ b/youtube_dl/extractor/tvplay.py
@@ -104,6 +104,7 @@ class TVPlayIE(InfoExtractor):
'duration': 1492,
'timestamp': 1330522854,
'upload_date': '20120229',
+ 'age_limit': 18,
},
'params': {
# rtmp download
diff --git a/youtube_dl/extractor/tweakers.py b/youtube_dl/extractor/tweakers.py
index c80ec15cf..f3198fb85 100644
--- a/youtube_dl/extractor/tweakers.py
+++ b/youtube_dl/extractor/tweakers.py
@@ -1,19 +1,13 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..utils import (
- xpath_text,
- xpath_with_ns,
- int_or_none,
- float_or_none,
-)
class TweakersIE(InfoExtractor):
_VALID_URL = r'https?://tweakers\.net/video/(?P<id>\d+)'
_TEST = {
'url': 'https://tweakers.net/video/9926/new-nintendo-3ds-xl-op-alle-fronten-beter.html',
- 'md5': '1b5afa817403bb5baa08359dca31e6df',
+ 'md5': '3147e4ddad366f97476a93863e4557c8',
'info_dict': {
'id': '9926',
'ext': 'mp4',
@@ -25,41 +19,7 @@ class TweakersIE(InfoExtractor):
}
def _real_extract(self, url):
- video_id = self._match_id(url)
-
- playlist = self._download_xml(
- 'https://tweakers.net/video/s1playlist/%s/playlist.xspf' % video_id,
- video_id)
-
- NS_MAP = {
- 'xspf': 'http://xspf.org/ns/0/',
- 's1': 'http://static.streamone.nl/player/ns/0',
- }
-
- track = playlist.find(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP))
-
- title = xpath_text(
- track, xpath_with_ns('./xspf:title', NS_MAP), 'title')
- description = xpath_text(
- track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
- thumbnail = xpath_text(
- track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
- duration = float_or_none(
- xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'),
- 1000)
-
- formats = [{
- 'url': location.text,
- 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
- 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
- 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
- } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'duration': duration,
- 'formats': formats,
- }
+ playlist_id = self._match_id(url)
+ entries = self._extract_xspf_playlist(
+ 'https://tweakers.net/video/s1playlist/%s/playlist.xspf' % playlist_id, playlist_id)
+ return self.playlist_result(entries, playlist_id)
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index 948c8ce39..023911c41 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -7,12 +7,17 @@ import random
from .common import InfoExtractor
from ..compat import (
+ compat_parse_qs,
compat_str,
compat_urllib_parse,
+ compat_urllib_parse_urlparse,
compat_urllib_request,
+ compat_urlparse,
)
from ..utils import (
ExtractorError,
+ int_or_none,
+ parse_duration,
parse_iso8601,
)
@@ -23,7 +28,7 @@ class TwitchBaseIE(InfoExtractor):
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'http://usher.twitch.tv'
_LOGIN_URL = 'https://secure.twitch.tv/login'
- _LOGIN_POST_URL = 'https://passport.twitch.tv/authorize'
+ _LOGIN_POST_URL = 'https://passport.twitch.tv/authentications/new'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
@@ -66,8 +71,15 @@ class TwitchBaseIE(InfoExtractor):
'password': password.encode('utf-8'),
})
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
+ 'post url', default=self._LOGIN_POST_URL, group='url')
+
+ if not post_url.startswith('http'):
+ post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
+
request = compat_urllib_request.Request(
- self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -129,14 +141,14 @@ class TwitchItemBaseIE(TwitchBaseIE):
def _extract_info(self, info):
return {
'id': info['_id'],
- 'title': info['title'],
- 'description': info['description'],
- 'duration': info['length'],
- 'thumbnail': info['preview'],
- 'uploader': info['channel']['display_name'],
- 'uploader_id': info['channel']['name'],
- 'timestamp': parse_iso8601(info['recorded_at']),
- 'view_count': info['views'],
+ 'title': info.get('title') or 'Untitled Broadcast',
+ 'description': info.get('description'),
+ 'duration': int_or_none(info.get('length')),
+ 'thumbnail': info.get('preview'),
+ 'uploader': info.get('channel', {}).get('display_name'),
+ 'uploader_id': info.get('channel', {}).get('name'),
+ 'timestamp': parse_iso8601(info.get('recorded_at')),
+ 'view_count': int_or_none(info.get('views')),
}
def _real_extract(self, url):
@@ -184,8 +196,8 @@ class TwitchVodIE(TwitchItemBaseIE):
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
- _TEST = {
- 'url': 'http://www.twitch.tv/riotgames/v/6528877',
+ _TESTS = [{
+ 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
@@ -197,12 +209,32 @@ class TwitchVodIE(TwitchItemBaseIE):
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
+ 'start_time': 310,
},
'params': {
# m3u8 download
'skip_download': True,
},
- }
+ }, {
+ # Untitled broadcast (title is None)
+ 'url': 'http://www.twitch.tv/belkao_o/v/11230755',
+ 'info_dict': {
+ 'id': 'v11230755',
+ 'ext': 'mp4',
+ 'title': 'Untitled Broadcast',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'duration': 1638,
+ 'timestamp': 1439746708,
+ 'upload_date': '20150816',
+ 'uploader': 'BelkAO_o',
+ 'uploader_id': 'belkao_o',
+ 'view_count': int,
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }]
def _real_extract(self, url):
item_id = self._match_id(url)
@@ -216,6 +248,12 @@ class TwitchVodIE(TwitchItemBaseIE):
item_id, 'mp4')
self._prefer_source(formats)
info['formats'] = formats
+
+ parsed_url = compat_urllib_parse_urlparse(url)
+ query = compat_parse_qs(parsed_url.query)
+ if 't' in query:
+ info['start_time'] = parse_duration(query['t'][0])
+
return info
@@ -310,9 +348,9 @@ class TwitchBookmarksIE(TwitchPlaylistBaseIE):
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
- _VALID_URL = r'%s/(?P<id>[^/]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
+ _VALID_URL = r'%s/(?P<id>[^/#?]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
- _TEST = {
+ _TESTS = [{
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
@@ -331,7 +369,10 @@ class TwitchStreamIE(TwitchBaseIE):
# m3u8 download
'skip_download': True,
},
- }
+ }, {
+ 'url': 'http://www.twitch.tv/miracle_doto#profile-0',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
channel_id = self._match_id(url)
diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index e2bab52fe..365d8b4bf 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -70,14 +70,16 @@ class UdemyIE(InfoExtractor):
def _login(self):
(username, password) = self._get_login_info()
if username is None:
- raise ExtractorError(
- 'Udemy account is required, use --username and --password options to provide account credentials.',
- expected=True)
+ self.raise_login_required('Udemy account is required')
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
- if login_popup == '<div class="run-command close-popup redirect" data-url="https://www.udemy.com/"></div>':
+ def is_logged(webpage):
+ return any(p in webpage for p in ['href="https://www.udemy.com/user/logout/', '>Logout<'])
+
+ # already logged in
+ if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
@@ -95,8 +97,7 @@ class UdemyIE(InfoExtractor):
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
- if all(logout_pattern not in response
- for logout_pattern in ['href="https://www.udemy.com/user/logout/', '>Logout<']):
+ if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py
index 04e2b0ba7..01af7a995 100644
--- a/youtube_dl/extractor/vice.py
+++ b/youtube_dl/extractor/vice.py
@@ -1,5 +1,4 @@
from __future__ import unicode_literals
-import re
from .common import InfoExtractor
from .ooyala import OoyalaIE
@@ -7,25 +6,29 @@ from ..utils import ExtractorError
class ViceIE(InfoExtractor):
- _VALID_URL = r'http://www\.vice\.com/.*?/(?P<name>.+)'
+ _VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?:[^/]+/)+(?P<id>.+)'
- _TEST = {
- 'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
- 'info_dict': {
- 'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
- 'ext': 'mp4',
- 'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
- },
- 'params': {
- # Requires ffmpeg (m3u8 manifest)
- 'skip_download': True,
- },
- }
+ _TESTS = [
+ {
+ 'url': 'http://www.vice.com/Fringes/cowboy-capitalists-part-1',
+ 'info_dict': {
+ 'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp',
+ 'ext': 'mp4',
+ 'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov',
+ },
+ 'params': {
+ # Requires ffmpeg (m3u8 manifest)
+ 'skip_download': True,
+ },
+ }, {
+ 'url': 'https://news.vice.com/video/experimenting-on-animals-inside-the-monkey-lab',
+ 'only_matching': True,
+ }
+ ]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- name = mobj.group('name')
- webpage = self._download_webpage(url, name)
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
try:
embed_code = self._search_regex(
r'embedCode=([^&\'"]+)', webpage,
diff --git a/youtube_dl/extractor/videobam.py b/youtube_dl/extractor/videobam.py
deleted file mode 100644
index 0eb3d9414..000000000
--- a/youtube_dl/extractor/videobam.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from __future__ import unicode_literals
-
-import re
-import json
-
-from .common import InfoExtractor
-from ..utils import int_or_none
-
-
-class VideoBamIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?videobam\.com/(?:videos/download/)?(?P<id>[a-zA-Z]+)'
-
- _TESTS = [
- {
- 'url': 'http://videobam.com/OiJQM',
- 'md5': 'db471f27763a531f10416a0c58b5a1e0',
- 'info_dict': {
- 'id': 'OiJQM',
- 'ext': 'mp4',
- 'title': 'Is Alcohol Worse Than Ecstasy?',
- 'description': 'md5:d25b96151515c91debc42bfbb3eb2683',
- 'uploader': 'frihetsvinge',
- },
- },
- {
- 'url': 'http://videobam.com/pqLvq',
- 'md5': 'd9a565b5379a99126ef94e1d7f9a383e',
- 'note': 'HD video',
- 'info_dict': {
- 'id': 'pqLvq',
- 'ext': 'mp4',
- 'title': '_',
- }
- },
- ]
-
- def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group('id')
-
- page = self._download_webpage('http://videobam.com/%s' % video_id, video_id, 'Downloading page')
-
- formats = []
-
- for preference, format_id in enumerate(['low', 'high']):
- mobj = re.search(r"%s: '(?P<url>[^']+)'" % format_id, page)
- if not mobj:
- continue
- formats.append({
- 'url': mobj.group('url'),
- 'ext': 'mp4',
- 'format_id': format_id,
- 'preference': preference,
- })
-
- if not formats:
- player_config = json.loads(self._html_search_regex(r'var player_config = ({.+?});', page, 'player config'))
- formats = [{
- 'url': item['url'],
- 'ext': 'mp4',
- } for item in player_config['playlist'] if 'autoPlay' in item]
-
- self._sort_formats(formats)
-
- title = self._og_search_title(page, default='_', fatal=False)
- description = self._og_search_description(page, default=None)
- thumbnail = self._og_search_thumbnail(page)
- uploader = self._html_search_regex(r'Upload by ([^<]+)</a>', page, 'uploader', fatal=False, default=None)
- view_count = int_or_none(
- self._html_search_regex(r'<strong>Views:</strong> (\d+) ', page, 'view count', fatal=False))
-
- return {
- 'id': video_id,
- 'title': title,
- 'description': description,
- 'thumbnail': thumbnail,
- 'uploader': uploader,
- 'view_count': view_count,
- 'formats': formats,
- 'age_limit': 18,
- }
diff --git a/youtube_dl/extractor/videolecturesnet.py b/youtube_dl/extractor/videolecturesnet.py
index d6a7eb203..ef2da5632 100644
--- a/youtube_dl/extractor/videolecturesnet.py
+++ b/youtube_dl/extractor/videolecturesnet.py
@@ -12,7 +12,7 @@ from ..utils import (
class VideoLecturesNetIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/'
+ _VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/*(?:[#?].*)?$'
IE_NAME = 'videolectures.net'
_TEST = {
diff --git a/youtube_dl/extractor/vidme.py b/youtube_dl/extractor/vidme.py
index e0b55078b..9a794e609 100644
--- a/youtube_dl/extractor/vidme.py
+++ b/youtube_dl/extractor/vidme.py
@@ -1,10 +1,12 @@
from __future__ import unicode_literals
from .common import InfoExtractor
+from ..compat import compat_HTTPError
from ..utils import (
+ ExtractorError,
int_or_none,
float_or_none,
- str_to_int,
+ parse_iso8601,
)
@@ -12,55 +14,138 @@ class VidmeIE(InfoExtractor):
_VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'https://vid.me/QNB',
- 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
+ 'md5': 'c62f1156138dc3323902188c5b5a8bd6',
'info_dict': {
'id': 'QNB',
'ext': 'mp4',
'title': 'Fishing for piranha - the easy way',
'description': 'source: https://www.facebook.com/photo.php?v=312276045600871',
- 'duration': 119.92,
+ 'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1406313244,
'upload_date': '20140725',
+ 'age_limit': 0,
+ 'duration': 119.92,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ }, {
+ 'url': 'https://vid.me/Gc6M',
+ 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82',
+ 'info_dict': {
+ 'id': 'Gc6M',
+ 'ext': 'mp4',
+ 'title': 'O Mere Dil ke chain - Arnav and Khushi VM',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1441211642,
+ 'upload_date': '20150902',
+ 'uploader': 'SunshineM',
+ 'uploader_id': '3552827',
+ 'age_limit': 0,
+ 'duration': 223.72,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # tests uploader field
+ 'url': 'https://vid.me/4Iib',
+ 'info_dict': {
+ 'id': '4Iib',
+ 'ext': 'mp4',
+ 'title': 'The Carver',
+ 'description': 'md5:e9c24870018ae8113be936645b93ba3c',
'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1433203629,
+ 'upload_date': '20150602',
+ 'uploader': 'Thomas',
+ 'uploader_id': '109747',
+ 'age_limit': 0,
+ 'duration': 97.859999999999999,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
},
}, {
- # From http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
+ # nsfw test from http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching
'url': 'https://vid.me/e/Wmur',
- 'only_matching': True,
+ 'info_dict': {
+ 'id': 'Wmur',
+ 'ext': 'mp4',
+ 'title': 'naked smoking & stretching',
+ 'thumbnail': 're:^https?://.*\.jpg',
+ 'timestamp': 1430931613,
+ 'upload_date': '20150506',
+ 'uploader': 'naked-yogi',
+ 'uploader_id': '1638622',
+ 'age_limit': 18,
+ 'duration': 653.26999999999998,
+ 'view_count': int,
+ 'like_count': int,
+ 'comment_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}]
def _real_extract(self, url):
- url = url.replace('vid.me/e/', 'vid.me/')
video_id = self._match_id(url)
- webpage = self._download_webpage(url, video_id)
- video_url = self._html_search_regex(
- r'<source src="([^"]+)"', webpage, 'video URL')
+ try:
+ response = self._download_json(
+ 'https://api.vid.me/videoByUrl/%s' % video_id, video_id)
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
+ response = self._parse_json(e.cause.read(), video_id)
+ else:
+ raise
+
+ error = response.get('error')
+ if error:
+ raise ExtractorError(
+ '%s returned error: %s' % (self.IE_NAME, error), expected=True)
+
+ video = response['video']
+
+ formats = [{
+ 'format_id': f.get('type'),
+ 'url': f['uri'],
+ 'width': int_or_none(f.get('width')),
+ 'height': int_or_none(f.get('height')),
+ } for f in video.get('formats', []) if f.get('uri')]
+ self._sort_formats(formats)
- title = self._og_search_title(webpage)
- description = self._og_search_description(webpage, default='')
- thumbnail = self._og_search_thumbnail(webpage)
- timestamp = int_or_none(self._og_search_property('updated_time', webpage, fatal=False))
- width = int_or_none(self._og_search_property('video:width', webpage, fatal=False))
- height = int_or_none(self._og_search_property('video:height', webpage, fatal=False))
- duration = float_or_none(self._html_search_regex(
- r'data-duration="([^"]+)"', webpage, 'duration', fatal=False))
- view_count = str_to_int(self._html_search_regex(
- r'<(?:li|span) class="video_views">\s*([\d,\.]+)\s*plays?', webpage, 'view count', fatal=False))
- like_count = str_to_int(self._html_search_regex(
- r'class="score js-video-vote-score"[^>]+data-score="([\d,\.\s]+)">',
- webpage, 'like count', fatal=False))
+ title = video['title']
+ description = video.get('description')
+ thumbnail = video.get('thumbnail_url')
+ timestamp = parse_iso8601(video.get('date_created'), ' ')
+ uploader = video.get('user', {}).get('username')
+ uploader_id = video.get('user', {}).get('user_id')
+ age_limit = 18 if video.get('nsfw') is True else 0
+ duration = float_or_none(video.get('duration'))
+ view_count = int_or_none(video.get('view_count'))
+ like_count = int_or_none(video.get('likes_count'))
+ comment_count = int_or_none(video.get('comment_count'))
return {
'id': video_id,
- 'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'age_limit': age_limit,
'timestamp': timestamp,
- 'width': width,
- 'height': height,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
+ 'comment_count': comment_count,
+ 'formats': formats,
}
diff --git a/youtube_dl/extractor/vier.py b/youtube_dl/extractor/vier.py
index 15377097e..c76c20614 100644
--- a/youtube_dl/extractor/vier.py
+++ b/youtube_dl/extractor/vier.py
@@ -2,6 +2,7 @@
from __future__ import unicode_literals
import re
+import itertools
from .common import InfoExtractor
@@ -91,31 +92,27 @@ class VierVideosIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
program = mobj.group('program')
- webpage = self._download_webpage(url, program)
-
page_id = mobj.group('page')
if page_id:
page_id = int(page_id)
start_page = page_id
- last_page = start_page + 1
playlist_id = '%s-page%d' % (program, page_id)
else:
start_page = 0
- last_page = int(self._search_regex(
- r'videos\?page=(\d+)">laatste</a>',
- webpage, 'last page', default=0)) + 1
playlist_id = program
entries = []
- for current_page_id in range(start_page, last_page):
+ for current_page_id in itertools.count(start_page):
current_page = self._download_webpage(
'http://www.vier.be/%s/videos?page=%d' % (program, current_page_id),
program,
- 'Downloading page %d' % (current_page_id + 1)) if current_page_id != page_id else webpage
+ 'Downloading page %d' % (current_page_id + 1))
page_entries = [
self.url_result('http://www.vier.be' + video_url, 'Vier')
for video_url in re.findall(
r'<h3><a href="(/[^/]+/videos/[^/]+(?:/\d+)?)">', current_page)]
entries.extend(page_entries)
+ if page_id or '>Meer<' not in current_page:
+ break
return self.playlist_result(entries, playlist_id)
diff --git a/youtube_dl/extractor/viewster.py b/youtube_dl/extractor/viewster.py
index 1742e66f4..cda02ba24 100644
--- a/youtube_dl/extractor/viewster.py
+++ b/youtube_dl/extractor/viewster.py
@@ -1,129 +1,142 @@
+# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_request
+from ..compat import (
+ compat_urllib_request,
+ compat_urllib_parse,
+ compat_urllib_parse_unquote,
+)
+from ..utils import (
+ determine_ext,
+ int_or_none,
+ parse_iso8601,
+ HEADRequest,
+)
class ViewsterIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?viewster\.com/movie/(?P<id>\d+-\d+-\d+)'
+ _VALID_URL = r'http://(?:www\.)?viewster\.com/(?:serie|movie)/(?P<id>\d+-\d+-\d+)'
_TESTS = [{
- # movielink, paymethod=fre
- 'url': 'http://www.viewster.com/movie/1293-19341-000/hout-wood/',
- 'playlist': [{
- 'md5': '8f9d94b282d80c42b378dffdbb11caf3',
- 'info_dict': {
- 'id': '1293-19341-000-movie',
- 'ext': 'flv',
- 'title': "'Hout' (Wood) - Movie",
- },
- }],
- 'info_dict': {
- 'id': '1293-19341-000',
- 'title': "'Hout' (Wood)",
- 'description': 'md5:925733185a9242ef96f436937683f33b',
- }
- }, {
- # movielink, paymethod=adv
+ # movie, Type=Movie
'url': 'http://www.viewster.com/movie/1140-11855-000/the-listening-project/',
- 'playlist': [{
- 'md5': '77a005453ca7396cbe3d35c9bea30aef',
- 'info_dict': {
- 'id': '1140-11855-000-movie',
- 'ext': 'flv',
- 'title': "THE LISTENING PROJECT - Movie",
- },
- }],
+ 'md5': '14d3cfffe66d57b41ae2d9c873416f01',
'info_dict': {
'id': '1140-11855-000',
- 'title': "THE LISTENING PROJECT",
- 'description': 'md5:714421ae9957e112e672551094bf3b08',
- }
+ 'ext': 'flv',
+ 'title': 'The listening Project',
+ 'description': 'md5:bac720244afd1a8ea279864e67baa071',
+ 'timestamp': 1214870400,
+ 'upload_date': '20080701',
+ 'duration': 4680,
+ },
}, {
- # direct links, no movielink
- 'url': 'http://www.viewster.com/movie/1198-56411-000/sinister/',
- 'playlist': [{
- 'md5': '0307b7eac6bfb21ab0577a71f6eebd8f',
- 'info_dict': {
- 'id': '1198-56411-000-trailer',
- 'ext': 'mp4',
- 'title': "Sinister - Trailer",
- },
- }, {
- 'md5': '80b9ee3ad69fb368f104cb5d9732ae95',
- 'info_dict': {
- 'id': '1198-56411-000-behind-scenes',
- 'ext': 'mp4',
- 'title': "Sinister - Behind Scenes",
- },
- }, {
- 'md5': '3b3ea897ecaa91fca57a8a94ac1b15c5',
- 'info_dict': {
- 'id': '1198-56411-000-scene-from-movie',
- 'ext': 'mp4',
- 'title': "Sinister - Scene from movie",
- },
- }],
+ # series episode, Type=Episode
+ 'url': 'http://www.viewster.com/serie/1284-19427-001/the-world-and-a-wall/',
+ 'md5': 'd5434c80fcfdb61651cc2199a88d6ba3',
'info_dict': {
- 'id': '1198-56411-000',
- 'title': "Sinister",
- 'description': 'md5:014c40b0488848de9683566a42e33372',
- }
+ 'id': '1284-19427-001',
+ 'ext': 'flv',
+ 'title': 'The World and a Wall',
+ 'description': 'md5:24814cf74d3453fdf5bfef9716d073e3',
+ 'timestamp': 1428192000,
+ 'upload_date': '20150405',
+ 'duration': 1500,
+ },
+ }, {
+ # serie, Type=Serie
+ 'url': 'http://www.viewster.com/serie/1303-19426-000/',
+ 'info_dict': {
+ 'id': '1303-19426-000',
+ 'title': 'Is It Wrong to Try to Pick up Girls in a Dungeon?',
+ 'description': 'md5:eeda9bef25b0d524b3a29a97804c2f11',
+ },
+ 'playlist_count': 13,
+ }, {
+ # unfinished serie, no Type
+ 'url': 'http://www.viewster.com/serie/1284-19427-000/baby-steps-season-2/',
+ 'info_dict': {
+ 'id': '1284-19427-000',
+ 'title': 'Baby Steps—Season 2',
+ 'description': 'md5:e7097a8fc97151e25f085c9eb7a1cdb1',
+ },
+ 'playlist_mincount': 16,
}]
_ACCEPT_HEADER = 'application/json, text/javascript, */*; q=0.01'
- def _real_extract(self, url):
- video_id = self._match_id(url)
-
- request = compat_urllib_request.Request(
- 'http://api.live.viewster.com/api/v1/movie/%s' % video_id)
+ def _download_json(self, url, video_id, note='Downloading JSON metadata', fatal=True):
+ request = compat_urllib_request.Request(url)
request.add_header('Accept', self._ACCEPT_HEADER)
+ request.add_header('Auth-token', self._AUTH_TOKEN)
+ return super(ViewsterIE, self)._download_json(request, video_id, note, fatal=fatal)
- movie = self._download_json(
- request, video_id, 'Downloading movie metadata JSON')
-
- title = movie.get('title') or movie['original_title']
- description = movie.get('synopsis')
- thumbnail = movie.get('large_artwork') or movie.get('artwork')
-
- entries = []
- for clip in movie['play_list']:
- entry = None
-
- # movielink api
- link_request = clip.get('link_request')
- if link_request:
- request = compat_urllib_request.Request(
- 'http://api.live.viewster.com/api/v1/movielink?movieid=%(movieid)s&action=%(action)s&paymethod=%(paymethod)s&price=%(price)s&currency=%(currency)s&language=%(language)s&subtitlelanguage=%(subtitlelanguage)s&ischromecast=%(ischromecast)s'
- % link_request)
- request.add_header('Accept', self._ACCEPT_HEADER)
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ # Get 'api_token' cookie
+ self._request_webpage(HEADRequest(url), video_id)
+ cookies = self._get_cookies(url)
+ self._AUTH_TOKEN = compat_urllib_parse_unquote(cookies['api_token'].value)
- movie_link = self._download_json(
- request, video_id, 'Downloading movie link JSON', fatal=False)
+ info = self._download_json(
+ 'https://public-api.viewster.com/search/%s' % video_id,
+ video_id, 'Downloading entry JSON')
- if movie_link:
- formats = self._extract_f4m_formats(
- movie_link['url'] + '&hdcore=3.2.0&plugin=flowplayer-3.2.0.1', video_id)
- self._sort_formats(formats)
- entry = {
- 'formats': formats,
- }
+ entry_id = info.get('Id') or info['id']
- # direct link
- clip_url = clip.get('clip_data', {}).get('url')
- if clip_url:
- entry = {
- 'url': clip_url,
- 'ext': 'mp4',
- }
+ # unfinished serie has no Type
+ if info.get('Type') in ['Serie', None]:
+ episodes = self._download_json(
+ 'https://public-api.viewster.com/series/%s/episodes' % entry_id,
+ video_id, 'Downloading series JSON')
+ entries = [
+ self.url_result(
+ 'http://www.viewster.com/movie/%s' % episode['OriginId'], 'Viewster')
+ for episode in episodes]
+ title = (info.get('Title') or info['Synopsis']['Title']).strip()
+ description = info.get('Synopsis', {}).get('Detailed')
+ return self.playlist_result(entries, video_id, title, description)
- if entry:
- entry.update({
- 'id': '%s-%s' % (video_id, clip['canonical_title']),
- 'title': '%s - %s' % (title, clip['title']),
+ formats = []
+ for media_type in ('application/f4m+xml', 'application/x-mpegURL'):
+ media = self._download_json(
+ 'https://public-api.viewster.com/movies/%s/video?mediaType=%s'
+ % (entry_id, compat_urllib_parse.quote(media_type)),
+ video_id, 'Downloading %s JSON' % media_type, fatal=False)
+ if not media:
+ continue
+ video_url = media.get('Uri')
+ if not video_url:
+ continue
+ ext = determine_ext(video_url)
+ if ext == 'f4m':
+ video_url += '&' if '?' in video_url else '?'
+ video_url += 'hdcore=3.2.0&plugin=flowplayer-3.2.0.1'
+ formats.extend(self._extract_f4m_formats(
+ video_url, video_id, f4m_id='hds'))
+ elif ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ video_url, video_id, 'mp4', m3u8_id='hls',
+ fatal=False # m3u8 sometimes fail
+ ))
+ else:
+ formats.append({
+ 'url': video_url,
})
- entries.append(entry)
+ self._sort_formats(formats)
- playlist = self.playlist_result(entries, video_id, title, description)
- playlist['thumbnail'] = thumbnail
- return playlist
+ synopsis = info.get('Synopsis', {})
+ # Prefer title outside synopsis since it's less messy
+ title = (info.get('Title') or synopsis['Title']).strip()
+ description = synopsis.get('Detailed') or info.get('Synopsis', {}).get('Short')
+ duration = int_or_none(info.get('Duration'))
+ timestamp = parse_iso8601(info.get('ReleaseDate'))
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'timestamp': timestamp,
+ 'duration': duration,
+ 'formats': formats,
+ }
diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py
index 51cdc6b65..ddbd395c8 100644
--- a/youtube_dl/extractor/viki.py
+++ b/youtube_dl/extractor/viki.py
@@ -88,6 +88,14 @@ class VikiBaseIE(InfoExtractor):
if not self._token:
self.report_warning('Unable to get session token, login has probably failed')
+ @staticmethod
+ def dict_selection(dict_obj, preferred_key):
+ if preferred_key in dict_obj:
+ return dict_obj.get(preferred_key)
+
+ filtered_dict = list(filter(None, [dict_obj.get(k) for k in dict_obj.keys()]))
+ return filtered_dict[0] if filtered_dict else None
+
class VikiIE(VikiBaseIE):
IE_NAME = 'viki'
@@ -173,6 +181,19 @@ class VikiIE(VikiBaseIE):
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
+ }, {
+ # non-English description
+ 'url': 'http://www.viki.com/videos/158036v-love-in-magic',
+ 'md5': '1713ae35df5a521b31f6dc40730e7c9c',
+ 'info_dict': {
+ 'id': '158036v',
+ 'ext': 'mp4',
+ 'uploader': 'I Planet Entertainment',
+ 'upload_date': '20111122',
+ 'timestamp': 1321985454,
+ 'description': 'md5:44b1e46619df3a072294645c770cef36',
+ 'title': 'Love In Magic',
+ },
}]
def _real_extract(self, url):
@@ -181,19 +202,14 @@ class VikiIE(VikiBaseIE):
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
- title = None
- titles = video.get('titles')
- if titles:
- title = titles.get('en') or titles[titles.keys()[0]]
+ title = self.dict_selection(video.get('titles', {}), 'en')
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
- container_titles = video.get('container', {}).get('titles')
- if container_titles:
- container_title = container_titles.get('en') or container_titles[container_titles.keys()[0]]
- title = '%s - %s' % (container_title, title)
+ container_titles = video.get('container', {}).get('titles', {})
+ container_title = self.dict_selection(container_titles, 'en')
+ title = '%s - %s' % (container_title, title)
- descriptions = video.get('descriptions')
- description = descriptions.get('en') or descriptions[titles.keys()[0]] if descriptions else None
+ description = self.dict_selection(video.get('descriptions', {}), 'en')
duration = int_or_none(video.get('duration'))
timestamp = parse_iso8601(video.get('created_at'))
@@ -242,8 +258,8 @@ class VikiIE(VikiBaseIE):
formats = []
for format_id, stream_dict in streams.items():
- height = self._search_regex(
- r'^(\d+)[pP]$', format_id, 'height', default=None)
+ height = int_or_none(self._search_regex(
+ r'^(\d+)[pP]$', format_id, 'height', default=None))
for protocol, format_dict in stream_dict.items():
if format_id == 'm3u8':
formats = self._extract_m3u8_formats(
@@ -299,11 +315,9 @@ class VikiChannelIE(VikiBaseIE):
'containers/%s.json' % channel_id, channel_id,
'Downloading channel JSON')
- titles = channel['titles']
- title = titles.get('en') or titles[titles.keys()[0]]
+ title = self.dict_selection(channel['titles'], 'en')
- descriptions = channel['descriptions']
- description = descriptions.get('en') or descriptions[descriptions.keys()[0]]
+ description = self.dict_selection(channel['descriptions'], 'en')
entries = []
for video_type in ('episodes', 'clips', 'movies'):
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 10d6745af..50df79ca1 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -29,6 +29,7 @@ from ..utils import (
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
+ _LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
(username, password) = self._get_login_info()
@@ -37,21 +38,25 @@ class VimeoBaseInfoExtractor(InfoExtractor):
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
- login_url = 'https://vimeo.com/log_in'
- webpage = self._download_webpage(login_url, None, False)
- token = self._search_regex(r'xsrft":"(.*?)"', webpage, 'login token')
+ webpage = self._download_webpage(self._LOGIN_URL, None, False)
+ token = self._extract_xsrft(webpage)
data = urlencode_postdata({
+ 'action': 'login',
'email': username,
'password': password,
- 'action': 'login',
'service': 'vimeo',
'token': token,
})
- login_request = compat_urllib_request.Request(login_url, data)
+ login_request = compat_urllib_request.Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- login_request.add_header('Cookie', 'xsrft=%s' % token)
+ login_request.add_header('Referer', self._LOGIN_URL)
self._download_webpage(login_request, None, False, 'Wrong login info')
+ def _extract_xsrft(self, webpage):
+ return self._search_regex(
+ r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
+ webpage, 'login token', group='xsrft')
+
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
@@ -193,7 +198,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
- token = self._search_regex(r'xsrft[\s=:"\']+([^"\']+)', webpage, 'login token')
+ token = self._extract_xsrft(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
@@ -203,7 +208,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
url = url.replace('http://', 'https://')
password_request = compat_urllib_request.Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
- password_request.add_header('Cookie', 'xsrft=%s' % token)
+ password_request.add_header('Referer', url)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
@@ -422,10 +427,11 @@ class VimeoIE(VimeoBaseInfoExtractor):
}
-class VimeoChannelIE(InfoExtractor):
+class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
+ _TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
@@ -440,7 +446,7 @@ class VimeoChannelIE(InfoExtractor):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
- return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
+ return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title')
def _login_list_password(self, page_url, list_id, webpage):
login_form = self._search_regex(
@@ -453,7 +459,7 @@ class VimeoChannelIE(InfoExtractor):
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
- token = self._search_regex(r'xsrft[\s=:"\']+([^"\']+)', webpage, 'login token')
+ token = self._extract_xsrft(webpage)
fields['token'] = token
fields['password'] = password
post = urlencode_postdata(fields)
@@ -499,7 +505,7 @@ class VimeoChannelIE(InfoExtractor):
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
- _VALID_URL = r'https://vimeo\.com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
+ _VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
@@ -603,14 +609,14 @@ class VimeoReviewIE(InfoExtractor):
return self.url_result(player_url, 'Vimeo', video_id)
-class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
+class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
- _VALID_URL = r'https://vimeo\.com/home/watchlater|:vimeowatchlater'
+ _VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
+ _TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
- _TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
_TESTS = [{
- 'url': 'https://vimeo.com/home/watchlater',
+ 'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
@@ -626,7 +632,7 @@ class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
return request
def _real_extract(self, url):
- return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')
+ return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(InfoExtractor):
diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py
new file mode 100644
index 000000000..86c1cb5ef
--- /dev/null
+++ b/youtube_dl/extractor/vlive.py
@@ -0,0 +1,86 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import hmac
+from hashlib import sha1
+from base64 import b64encode
+from time import time
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ determine_ext
+)
+from ..compat import compat_urllib_parse
+
+
+class VLiveIE(InfoExtractor):
+ IE_NAME = 'vlive'
+ # www.vlive.tv/video/ links redirect to m.vlive.tv/video/ for mobile devices
+ _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://m.vlive.tv/video/1326',
+ 'md5': 'cc7314812855ce56de70a06a27314983',
+ 'info_dict': {
+ 'id': '1326',
+ 'ext': 'mp4',
+ 'title': '[V] Girl\'s Day\'s Broadcast',
+ 'creator': 'Girl\'s Day',
+ },
+ }
+ _SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH'
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+
+ webpage = self._download_webpage(
+ 'http://m.vlive.tv/video/%s' % video_id,
+ video_id, note='Download video page')
+
+ title = self._og_search_title(webpage)
+ thumbnail = self._og_search_thumbnail(webpage)
+ creator = self._html_search_regex(
+ r'<span[^>]+class="name">([^<>]+)</span>', webpage, 'creator')
+
+ url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id
+ msgpad = '%.0f' % (time() * 1000)
+ md = b64encode(
+ hmac.new(self._SECRET.encode('ascii'),
+ (url[:255] + msgpad).encode('ascii'), sha1).digest()
+ )
+ url += '&' + compat_urllib_parse.urlencode({'msgpad': msgpad, 'md': md})
+ playinfo = self._download_json(url, video_id, 'Downloading video json')
+
+ if playinfo.get('message', '') != 'success':
+ raise ExtractorError(playinfo.get('message', 'JSON request unsuccessful'))
+
+ if not playinfo.get('result'):
+ raise ExtractorError('No videos found.')
+
+ formats = []
+ for vid in playinfo['result'].get('videos', {}).get('list', []):
+ formats.append({
+ 'url': vid['source'],
+ 'ext': 'mp4',
+ 'abr': vid.get('bitrate', {}).get('audio'),
+ 'vbr': vid.get('bitrate', {}).get('video'),
+ 'format_id': vid['encodingOption']['name'],
+ 'height': vid.get('height'),
+ 'width': vid.get('width'),
+ })
+ self._sort_formats(formats)
+
+ subtitles = {}
+ for caption in playinfo['result'].get('captions', {}).get('list', []):
+ subtitles[caption['language']] = [
+ {'ext': determine_ext(caption['source'], default_ext='vtt'),
+ 'url': caption['source']}]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'creator': creator,
+ 'thumbnail': thumbnail,
+ 'formats': formats,
+ 'subtitles': subtitles,
+ }
diff --git a/youtube_dl/extractor/washingtonpost.py b/youtube_dl/extractor/washingtonpost.py
index 72eb010f8..ec8b99998 100644
--- a/youtube_dl/extractor/washingtonpost.py
+++ b/youtube_dl/extractor/washingtonpost.py
@@ -19,25 +19,25 @@ class WashingtonPostIE(InfoExtractor):
'title': 'Sinkhole of bureaucracy',
},
'playlist': [{
- 'md5': '79132cc09ec5309fa590ae46e4cc31bc',
+ 'md5': 'b9be794ceb56c7267d410a13f99d801a',
'info_dict': {
'id': 'fc433c38-b146-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'Breaking Points: The Paper Mine',
- 'duration': 1287,
+ 'duration': 1290,
'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.',
'uploader': 'The Washington Post',
'timestamp': 1395527908,
'upload_date': '20140322',
},
}, {
- 'md5': 'e1d5734c06865cc504ad99dc2de0d443',
+ 'md5': '1fff6a689d8770966df78c8cb6c8c17c',
'info_dict': {
'id': '41255e28-b14a-11e3-b8b3-44b1d1cd4c1f',
'ext': 'mp4',
'title': 'The town bureaucracy sustains',
'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.',
- 'duration': 2217,
+ 'duration': 2220,
'timestamp': 1395528005,
'upload_date': '20140322',
'uploader': 'The Washington Post',
diff --git a/youtube_dl/extractor/wimp.py b/youtube_dl/extractor/wimp.py
index f69d46a28..e4f50e64c 100644
--- a/youtube_dl/extractor/wimp.py
+++ b/youtube_dl/extractor/wimp.py
@@ -1,40 +1,33 @@
from __future__ import unicode_literals
-import re
-
from .common import InfoExtractor
from .youtube import YoutubeIE
class WimpIE(InfoExtractor):
- _VALID_URL = r'http://(?:www\.)?wimp\.com/([^/]+)/'
+ _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.wimp.com/maruexhausted/',
- 'md5': 'f1acced123ecb28d9bb79f2479f2b6a1',
+ 'md5': 'ee21217ffd66d058e8b16be340b74883',
'info_dict': {
'id': 'maruexhausted',
- 'ext': 'flv',
+ 'ext': 'mp4',
'title': 'Maru is exhausted.',
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
}
}, {
- # youtube video
'url': 'http://www.wimp.com/clowncar/',
+ 'md5': '4e2986c793694b55b37cf92521d12bb4',
'info_dict': {
- 'id': 'cG4CEr2aiSg',
+ 'id': 'clowncar',
'ext': 'mp4',
- 'title': 'Basset hound clown car...incredible!',
- 'description': 'md5:8d228485e0719898c017203f900b3a35',
- 'uploader': 'Gretchen Hoey',
- 'uploader_id': 'gretchenandjeff1',
- 'upload_date': '20140303',
+ 'title': 'It\'s like a clown car.',
+ 'description': 'md5:0e56db1370a6e49c5c1d19124c0d2fb2',
},
- 'add_ie': ['Youtube'],
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
+ video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
[r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"],
diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index b4ad513a0..97315750f 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -4,7 +4,6 @@ import re
from .common import InfoExtractor
from ..utils import (
- ExtractorError,
unified_strdate,
str_to_int,
int_or_none,
@@ -22,7 +21,7 @@ class XHamsterIE(InfoExtractor):
'ext': 'mp4',
'title': 'FemaleAgent Shy beauty takes the bait',
'upload_date': '20121014',
- 'uploader_id': 'Ruseful2011',
+ 'uploader': 'Ruseful2011',
'duration': 893,
'age_limit': 18,
}
@@ -34,7 +33,7 @@ class XHamsterIE(InfoExtractor):
'ext': 'mp4',
'title': 'Britney Spears Sexy Booty',
'upload_date': '20130914',
- 'uploader_id': 'jojo747400',
+ 'uploader': 'jojo747400',
'duration': 200,
'age_limit': 18,
}
@@ -46,12 +45,12 @@ class XHamsterIE(InfoExtractor):
]
def _real_extract(self, url):
- def extract_video_url(webpage):
- mp4 = re.search(r'<video\s+.*?file="([^"]+)".*?>', webpage)
- if mp4 is None:
- raise ExtractorError('Unable to extract media URL')
- else:
- return mp4.group(1)
+ def extract_video_url(webpage, name):
+ return self._search_regex(
+ [r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
+ r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
+ r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
+ webpage, name, group='mp4')
def is_hd(webpage):
return '<div class=\'icon iconHD\'' in webpage
@@ -75,10 +74,14 @@ class XHamsterIE(InfoExtractor):
if upload_date:
upload_date = unified_strdate(upload_date)
- uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
- webpage, 'uploader id', default='anonymous')
+ uploader = self._html_search_regex(
+ r"<a href='[^']+xhamster\.com/user/[^>]+>(?P<uploader>[^<]+)",
+ webpage, 'uploader', default='anonymous')
- thumbnail = self._html_search_regex(r'<video\s+.*?poster="([^"]+)".*?>', webpage, 'thumbnail', fatal=False)
+ thumbnail = self._search_regex(
+ [r'''thumb\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
+ r'''<video[^>]+poster=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
+ webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
webpage, 'duration', fatal=False))
@@ -97,7 +100,9 @@ class XHamsterIE(InfoExtractor):
hd = is_hd(webpage)
- video_url = extract_video_url(webpage)
+ format_id = 'hd' if hd else 'sd'
+
+ video_url = extract_video_url(webpage, format_id)
formats = [{
'url': video_url,
'format_id': 'hd' if hd else 'sd',
@@ -108,7 +113,7 @@ class XHamsterIE(InfoExtractor):
mrss_url = self._search_regex(r'<link rel="canonical" href="([^"]+)', webpage, 'mrss_url')
webpage = self._download_webpage(mrss_url + '?hd', video_id, note='Downloading HD webpage')
if is_hd(webpage):
- video_url = extract_video_url(webpage)
+ video_url = extract_video_url(webpage, 'hd')
formats.append({
'url': video_url,
'format_id': 'hd',
@@ -122,7 +127,7 @@ class XHamsterIE(InfoExtractor):
'title': title,
'description': description,
'upload_date': upload_date,
- 'uploader_id': uploader_id,
+ 'uploader': uploader,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
diff --git a/youtube_dl/extractor/xuite.py b/youtube_dl/extractor/xuite.py
index 5aac8adb3..8bbac54e2 100644
--- a/youtube_dl/extractor/xuite.py
+++ b/youtube_dl/extractor/xuite.py
@@ -19,7 +19,7 @@ class XuiteIE(InfoExtractor):
_TESTS = [{
# Audio
'url': 'http://vlog.xuite.net/play/RGkzc1ZULTM4NjA5MTQuZmx2',
- 'md5': '63a42c705772aa53fd4c1a0027f86adf',
+ 'md5': 'e79284c87b371424885448d11f6398c8',
'info_dict': {
'id': '3860914',
'ext': 'mp3',
diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py
index f9afbdbab..fca5ddc69 100644
--- a/youtube_dl/extractor/yahoo.py
+++ b/youtube_dl/extractor/yahoo.py
@@ -101,7 +101,7 @@ class YahooIE(InfoExtractor):
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
- 'md5': '67010fdf3a08d290e060a4dd96baa07b',
+ 'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
@@ -144,6 +144,17 @@ class YahooIE(InfoExtractor):
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
+ }, {
+ # Query result is embedded in webpage, but explicit request to video API fails with geo restriction
+ 'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
+ 'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
+ 'info_dict': {
+ 'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
+ 'ext': 'mp4',
+ 'title': 'Communitary - Community Episode 1: Ladders',
+ 'description': 'md5:8fc39608213295748e1e289807838c97',
+ 'duration': 1646,
+ },
}
]
@@ -171,6 +182,19 @@ class YahooIE(InfoExtractor):
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
+ # Query result is often embedded in webpage as JSON. Sometimes explicit requests
+ # to video API results in a failure with geo restriction reason therefore using
+ # embedded query result when present sounds reasonable.
+ config_json = self._search_regex(
+ r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
+ webpage, 'videoplayer applet', default=None)
+ if config_json:
+ config = self._parse_json(config_json, display_id, fatal=False)
+ if config:
+ sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
+ if sapi:
+ return self._extract_info(display_id, sapi, webpage)
+
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
@@ -190,22 +214,10 @@ class YahooIE(InfoExtractor):
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
- def _get_info(self, video_id, display_id, webpage):
- region = self._search_regex(
- r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
- webpage, 'region', fatal=False, default='US')
- data = compat_urllib_parse.urlencode({
- 'protocol': 'http',
- 'region': region,
- })
- query_url = (
- 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
- '{id}?{data}'.format(id=video_id, data=data))
- query_result = self._download_json(
- query_url, display_id, 'Downloading video info')
-
- info = query_result['query']['results']['mediaObj'][0]
+ def _extract_info(self, display_id, query, webpage):
+ info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
+ video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
@@ -231,6 +243,9 @@ class YahooIE(InfoExtractor):
'ext': 'flv',
})
else:
+ if s.get('format') == 'm3u8_playlist':
+ format_info['protocol'] = 'm3u8_native'
+ format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
@@ -264,6 +279,21 @@ class YahooIE(InfoExtractor):
'subtitles': subtitles,
}
+ def _get_info(self, video_id, display_id, webpage):
+ region = self._search_regex(
+ r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
+ webpage, 'region', fatal=False, default='US')
+ data = compat_urllib_parse.urlencode({
+ 'protocol': 'http',
+ 'region': region,
+ })
+ query_url = (
+ 'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
+ '{id}?{data}'.format(id=video_id, data=data))
+ query_result = self._download_json(
+ query_url, display_id, 'Downloading video info')
+ return self._extract_info(display_id, query_result, webpage)
+
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index f4c0f5702..4098e4629 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -1,18 +1,38 @@
-# coding=utf-8
+# coding: utf-8
from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
-from ..compat import compat_str
+from ..compat import (
+ compat_str,
+ compat_urllib_parse,
+ compat_urllib_request,
+)
from ..utils import (
int_or_none,
float_or_none,
)
-class YandexMusicBaseIE(InfoExtractor):
+class YandexMusicTrackIE(InfoExtractor):
+ IE_NAME = 'yandexmusic:track'
+ IE_DESC = 'Яндекс.Музыка - Трек'
+ _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)'
+
+ _TEST = {
+ 'url': 'http://music.yandex.ru/album/540508/track/4878838',
+ 'md5': 'f496818aa2f60b6c0062980d2e00dc20',
+ 'info_dict': {
+ 'id': '4878838',
+ 'ext': 'mp3',
+ 'title': 'Carlo Ambrosio - Gypsy Eyes 1',
+ 'filesize': 4628061,
+ 'duration': 193.04,
+ }
+ }
+
def _get_track_url(self, storage_dir, track_id):
data = self._download_json(
'http://music.yandex.ru/api/v1.5/handlers/api-jsonp.jsx?action=getTrackSrc&p=download-info/%s'
@@ -35,24 +55,6 @@ class YandexMusicBaseIE(InfoExtractor):
'duration': float_or_none(track.get('durationMs'), 1000),
}
-
-class YandexMusicTrackIE(YandexMusicBaseIE):
- IE_NAME = 'yandexmusic:track'
- IE_DESC = 'Яндекс.Музыка - Трек'
- _VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)'
-
- _TEST = {
- 'url': 'http://music.yandex.ru/album/540508/track/4878838',
- 'md5': 'f496818aa2f60b6c0062980d2e00dc20',
- 'info_dict': {
- 'id': '4878838',
- 'ext': 'mp3',
- 'title': 'Carlo Ambrosio - Gypsy Eyes 1',
- 'filesize': 4628061,
- 'duration': 193.04,
- }
- }
-
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
album_id, track_id = mobj.group('album_id'), mobj.group('id')
@@ -64,7 +66,15 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
return self._get_track_info(track)
-class YandexMusicAlbumIE(YandexMusicBaseIE):
+class YandexMusicPlaylistBaseIE(InfoExtractor):
+ def _build_playlist(self, tracks):
+ return [
+ self.url_result(
+ 'http://music.yandex.ru/album/%s/track/%s' % (track['albums'][0]['id'], track['id']))
+ for track in tracks if track.get('albums') and isinstance(track.get('albums'), list)]
+
+
+class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE):
IE_NAME = 'yandexmusic:album'
IE_DESC = 'Яндекс.Музыка - Альбом'
_VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/album/(?P<id>\d+)/?(\?|$)'
@@ -85,7 +95,7 @@ class YandexMusicAlbumIE(YandexMusicBaseIE):
'http://music.yandex.ru/handlers/album.jsx?album=%s' % album_id,
album_id, 'Downloading album JSON')
- entries = [self._get_track_info(track) for track in album['volumes'][0]]
+ entries = self._build_playlist(album['volumes'][0])
title = '%s - %s' % (album['artists'][0]['name'], album['title'])
year = album.get('year')
@@ -95,12 +105,12 @@ class YandexMusicAlbumIE(YandexMusicBaseIE):
return self.playlist_result(entries, compat_str(album['id']), title)
-class YandexMusicPlaylistIE(YandexMusicBaseIE):
+class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
IE_NAME = 'yandexmusic:playlist'
IE_DESC = 'Яндекс.Музыка - Плейлист'
_VALID_URL = r'https?://music\.yandex\.(?:ru|kz|ua|by)/users/[^/]+/playlists/(?P<id>\d+)'
- _TEST = {
+ _TESTS = [{
'url': 'http://music.yandex.ru/users/music.partners/playlists/1245',
'info_dict': {
'id': '1245',
@@ -108,20 +118,54 @@ class YandexMusicPlaylistIE(YandexMusicBaseIE):
'description': 'md5:3b9f27b0efbe53f2ee1e844d07155cc9',
},
'playlist_count': 6,
- }
+ }, {
+ # playlist exceeding the limit of 150 tracks shipped with webpage (see
+ # https://github.com/rg3/youtube-dl/issues/6666)
+ 'url': 'https://music.yandex.ru/users/ya.playlist/playlists/1036',
+ 'info_dict': {
+ 'id': '1036',
+ 'title': 'Музыка 90-х',
+ },
+ 'playlist_count': 310,
+ }]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
- playlist = self._parse_json(
+ mu = self._parse_json(
self._search_regex(
r'var\s+Mu\s*=\s*({.+?});\s*</script>', webpage, 'player'),
- playlist_id)['pageData']['playlist']
-
- entries = [self._get_track_info(track) for track in playlist['tracks']]
+ playlist_id)
+
+ playlist = mu['pageData']['playlist']
+ tracks, track_ids = playlist['tracks'], playlist['trackIds']
+
+ # tracks dictionary shipped with webpage is limited to 150 tracks,
+ # missing tracks should be retrieved manually.
+ if len(tracks) < len(track_ids):
+ present_track_ids = set([compat_str(track['id']) for track in tracks if track.get('id')])
+ missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
+ request = compat_urllib_request.Request(
+ 'https://music.yandex.ru/handlers/track-entries.jsx',
+ compat_urllib_parse.urlencode({
+ 'entries': ','.join(missing_track_ids),
+ 'lang': mu.get('settings', {}).get('lang', 'en'),
+ 'external-domain': 'music.yandex.ru',
+ 'overembed': 'false',
+ 'sign': mu.get('authData', {}).get('user', {}).get('sign'),
+ 'strict': 'true',
+ }).encode('utf-8'))
+ request.add_header('Referer', url)
+ request.add_header('X-Requested-With', 'XMLHttpRequest')
+
+ missing_tracks = self._download_json(
+ request, playlist_id, 'Downloading missing tracks JSON', fatal=False)
+ if missing_tracks:
+ tracks.extend(missing_tracks)
return self.playlist_result(
- entries, compat_str(playlist_id),
+ self._build_playlist(tracks),
+ compat_str(playlist_id),
playlist['title'], playlist.get('description'))
diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py
index 78caeb8b3..2e81d9223 100644
--- a/youtube_dl/extractor/youku.py
+++ b/youtube_dl/extractor/youku.py
@@ -49,6 +49,17 @@ class YoukuIE(InfoExtractor):
},
'playlist_count': 13,
'skip': 'Available in China only',
+ }, {
+ 'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
+ 'note': 'Video protected with password',
+ 'info_dict': {
+ 'id': 'XNjA1NzA2Njgw',
+ 'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起',
+ },
+ 'playlist_count': 19,
+ 'params': {
+ 'videopassword': '100600',
+ },
}]
def construct_video_urls(self, data1, data2):
@@ -185,9 +196,15 @@ class YoukuIE(InfoExtractor):
raw_data = self._download_json(req, video_id, note=note)
return raw_data['data'][0]
+ video_password = self._downloader.params.get('videopassword', None)
+
# request basic data
+ basic_data_url = 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id
+ if video_password:
+ basic_data_url += '?password=%s' % video_password
+
data1 = retrieve_data(
- 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id,
+ basic_data_url,
'Downloading JSON metadata 1')
data2 = retrieve_data(
'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id,
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index e7f5c7861..97ce36550 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -19,21 +19,27 @@ from ..compat import (
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
+ compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
+ encode_dict,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
orderedSet,
+ parse_duration,
+ remove_start,
+ smuggle_url,
str_to_int,
unescapeHTML,
unified_strdate,
+ unsmuggle_url,
uppercase_escape,
ISO3166Utils,
)
@@ -42,7 +48,7 @@ from ..utils import (
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
- _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
+ _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
@@ -106,10 +112,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'hl': 'en_US',
}
- # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
- # chokes on unicode
- login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
- login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
+ login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
@@ -124,42 +127,25 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
- if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
- tfa_code = self._get_tfa_info()
+ if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
+ tfa_code = self._get_tfa_info('2-step verification code')
- if tfa_code is None:
- self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
- self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
+ if not tfa_code:
+ self._downloader.report_warning(
+ 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
+ '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
- # Unlike the first login form, secTok and timeStmp are both required for the TFA form
-
- match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
- if match is None:
- self._downloader.report_warning('Failed to get secTok - did the page structure change?')
- secTok = match.group(1)
- match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
- if match is None:
- self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
- timeStmp = match.group(1)
-
- tfa_form_strs = {
- 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
- 'smsToken': '',
- 'smsUserPin': tfa_code,
- 'smsVerifyPin': 'Verify',
-
- 'PersistentCookie': 'yes',
- 'checkConnection': '',
- 'checkedDomains': 'youtube',
- 'pstMsg': '1',
- 'secTok': secTok,
- 'timeStmp': timeStmp,
- 'service': 'youtube',
- 'hl': 'en_US',
- }
- tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in tfa_form_strs.items())
- tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
+ tfa_code = remove_start(tfa_code, 'G-')
+
+ tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
+
+ tfa_form_strs.update({
+ 'Pin': tfa_code,
+ 'TrustDevice': 'on',
+ })
+
+ tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
@@ -169,8 +155,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
if tfa_results is False:
return False
- if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
- self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
+ if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
+ self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
@@ -209,11 +195,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
- (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
+ (?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))
- |youtu\.be/ # just youtu.be/xxxx
+ |(?:
+ youtu\.be| # just youtu.be/xxxx
+ vid\.plus # or vid.plus/xxxx
+ )/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
@@ -279,13 +268,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
- '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
- '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
+ '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
+ '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
@@ -295,11 +284,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
- '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
- '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
- '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
- '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'VP9'},
- '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
+ '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
+ '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
+ '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
+ '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
+ '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
@@ -317,7 +306,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube'
_TESTS = [
{
- 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
+ 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
@@ -327,8 +316,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
'categories': ['Science & Technology'],
+ 'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
+ 'start_time': 1,
+ 'end_time': 9,
}
},
{
@@ -339,7 +331,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
- 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
+ 'description': 'md5:782e8651347686cba06e58f71ab51773',
+ 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
+ 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
+ 'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
}
@@ -355,6 +350,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
+ 'age_limit': 18,
}
},
{
@@ -371,6 +367,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
}
},
{
+ 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
+ 'note': 'Use the first video ID in the URL',
+ 'info_dict': {
+ 'id': 'BaW_jenozKc',
+ 'ext': 'mp4',
+ 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
+ 'uploader': 'Philipp Hagemeister',
+ 'uploader_id': 'phihag',
+ 'upload_date': '20121002',
+ 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
+ 'categories': ['Science & Technology'],
+ 'tags': ['youtube-dl'],
+ 'like_count': int,
+ 'dislike_count': int,
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ {
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
@@ -411,7 +427,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
- 'description': 'md5:2acfda1b285bdd478ccec22f9918199d',
+ 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
@@ -445,6 +461,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'upload_date': '20140605',
+ 'age_limit': 18,
},
},
# Age-gate video with encrypted signature
@@ -458,6 +475,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'upload_date': '20110629',
+ 'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
@@ -482,7 +500,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
- 'upload_date': '20120731',
+ 'upload_date': '20120724',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
@@ -511,7 +529,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
- 'ext': 'mp4',
+ 'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
@@ -535,6 +553,81 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader': 'dorappi2000',
'formats': 'mincount:33',
},
+ },
+ # DASH manifest with segment_list
+ {
+ 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
+ 'md5': '8ce563a1d667b599d21064e982ab9e31',
+ 'info_dict': {
+ 'id': 'CsmdDsKjzN8',
+ 'ext': 'mp4',
+ 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
+ 'uploader': 'Airtek',
+ 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
+ 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
+ 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
+ },
+ 'params': {
+ 'youtube_include_dash_manifest': True,
+ 'format': '135', # bestvideo
+ }
+ },
+ {
+ # Multifeed videos (multiple cameras), URL is for Main Camera
+ 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
+ 'info_dict': {
+ 'id': 'jqWvoWXjCVs',
+ 'title': 'teamPGP: Rocket League Noob Stream',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ },
+ 'playlist': [{
+ 'info_dict': {
+ 'id': 'jqWvoWXjCVs',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }, {
+ 'info_dict': {
+ 'id': '6h8e8xoXJzg',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }, {
+ 'info_dict': {
+ 'id': 'PUOgX5z9xZw',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }, {
+ 'info_dict': {
+ 'id': 'teuwxikvS5k',
+ 'ext': 'mp4',
+ 'title': 'teamPGP: Rocket League Noob Stream (zim)',
+ 'description': 'md5:dc7872fb300e143831327f1bae3af010',
+ 'upload_date': '20150721',
+ 'uploader': 'Beer Games Beer',
+ 'uploader_id': 'beergamesbeer',
+ },
+ }],
+ 'params': {
+ 'skip_download': True,
+ },
+ },
+ {
+ 'url': 'http://vid.plus/FlRa-iH7PGw',
+ 'only_matching': True,
}
]
@@ -564,7 +657,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
- r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
+ r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
@@ -826,6 +919,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# TODO implement WebVTT downloading
pass
elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
+ segment_list = r.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
format_id = r.attrib['id']
video_url = url_el.text
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
@@ -839,6 +933,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'filesize': filesize,
'fps': int_or_none(r.attrib.get('frameRate')),
}
+ if segment_list is not None:
+ f.update({
+ 'initialization_url': segment_list.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib['sourceURL'],
+ 'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
+ 'protocol': 'http_dash_segments',
+ })
try:
existing_format = next(
fo for fo in formats
@@ -860,10 +960,24 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return formats
def _real_extract(self, url):
+ url, smuggled_data = unsmuggle_url(url, {})
+
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
+ start_time = None
+ end_time = None
+ parsed_url = compat_urllib_parse_urlparse(url)
+ for component in [parsed_url.fragment, parsed_url.query]:
+ query = compat_parse_qs(component)
+ if start_time is None and 't' in query:
+ start_time = parse_duration(query['t'][0])
+ if start_time is None and 'start' in query:
+ start_time = parse_duration(query['start'][0])
+ if end_time is None and 'end' in query:
+ end_time = parse_duration(query['end'][0])
+
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
@@ -890,6 +1004,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# Get video info
embed_webpage = None
+ is_live = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
@@ -922,6 +1037,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
+ if args.get('livestream') == '1' or args.get('live_playback') == 1:
+ is_live = True
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
@@ -939,7 +1056,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_id, note=False,
errnote='unable to download video info webpage')
get_video_info = compat_parse_qs(video_info_webpage)
- add_dash_mpd(get_video_info)
+ if get_video_info.get('use_cipher_signature') != ['True']:
+ add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
@@ -948,7 +1066,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
- if regions_allowed is not None:
+ if regions_allowed:
raise ExtractorError('YouTube said: This video is available in %s only' % (
', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
expected=True)
@@ -960,6 +1078,55 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'"token" parameter not in video info for unknown reason',
video_id=video_id)
+ # title
+ if 'title' in video_info:
+ video_title = video_info['title'][0]
+ else:
+ self._downloader.report_warning('Unable to extract video title')
+ video_title = '_'
+
+ # description
+ video_description = get_element_by_id("eow-description", video_webpage)
+ if video_description:
+ video_description = re.sub(r'''(?x)
+ <a\s+
+ (?:[a-zA-Z-]+="[^"]+"\s+)*?
+ title="([^"]+)"\s+
+ (?:[a-zA-Z-]+="[^"]+"\s+)*?
+ class="yt-uix-redirect-link"\s*>
+ [^<]+
+ </a>
+ ''', r'\1', video_description)
+ video_description = clean_html(video_description)
+ else:
+ fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
+ if fd_mobj:
+ video_description = unescapeHTML(fd_mobj.group(1))
+ else:
+ video_description = ''
+
+ if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
+ if not self._downloader.params.get('noplaylist'):
+ entries = []
+ feed_ids = []
+ multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
+ for feed in multifeed_metadata_list.split(','):
+ feed_data = compat_parse_qs(feed)
+ entries.append({
+ '_type': 'url_transparent',
+ 'ie_key': 'Youtube',
+ 'url': smuggle_url(
+ '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
+ {'force_singlefeed': True}),
+ 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
+ })
+ feed_ids.append(feed_data['id'][0])
+ self.to_screen(
+ 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
+ % (', '.join(feed_ids), video_id))
+ return self.playlist_result(entries, video_id, video_title, video_description)
+ self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
+
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
@@ -985,13 +1152,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
else:
self._downloader.report_warning('unable to extract uploader nickname')
- # title
- if 'title' in video_info:
- video_title = video_info['title'][0]
- else:
- self._downloader.report_warning('Unable to extract video title')
- video_title = '_'
-
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
@@ -1027,25 +1187,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
else:
video_categories = None
- # description
- video_description = get_element_by_id("eow-description", video_webpage)
- if video_description:
- video_description = re.sub(r'''(?x)
- <a\s+
- (?:[a-zA-Z-]+="[^"]+"\s+)*?
- title="([^"]+)"\s+
- (?:[a-zA-Z-]+="[^"]+"\s+)*?
- class="yt-uix-redirect-link"\s*>
- [^<]+
- </a>
- ''', r'\1', video_description)
- video_description = clean_html(video_description)
- else:
- fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
- if fd_mobj:
- video_description = unescapeHTML(fd_mobj.group(1))
- else:
- video_description = ''
+ video_tags = [
+ unescapeHTML(m.group('content'))
+ for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
@@ -1096,7 +1240,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
- url_map = {}
+ formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
@@ -1142,7 +1286,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
- r'html5player-([^/]+?)(?:/html5player)?\.js',
+ r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
@@ -1156,8 +1300,50 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
- url_map[format_id] = url
- formats = _map_to_format_list(url_map)
+
+ # Some itags are not included in DASH manifest thus corresponding formats will
+ # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
+ # Trying to extract metadata from url_encoded_fmt_stream_map entry.
+ mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
+ width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
+ dct = {
+ 'format_id': format_id,
+ 'url': url,
+ 'player_url': player_url,
+ 'filesize': int_or_none(url_data.get('clen', [None])[0]),
+ 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
+ 'width': width,
+ 'height': height,
+ 'fps': int_or_none(url_data.get('fps', [None])[0]),
+ 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
+ }
+ type_ = url_data.get('type', [None])[0]
+ if type_:
+ type_split = type_.split(';')
+ kind_ext = type_split[0].split('/')
+ if len(kind_ext) == 2:
+ kind, ext = kind_ext
+ dct['ext'] = ext
+ if kind in ('audio', 'video'):
+ codecs = None
+ for mobj in re.finditer(
+ r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
+ if mobj.group('key') == 'codecs':
+ codecs = mobj.group('val')
+ break
+ if codecs:
+ codecs = codecs.split(',')
+ if len(codecs) == 2:
+ acodec, vcodec = codecs[0], codecs[1]
+ else:
+ acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
+ dct.update({
+ 'acodec': acodec,
+ 'vcodec': vcodec,
+ })
+ if format_id in self._formats:
+ dct.update(self._formats[format_id])
+ formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
@@ -1215,6 +1401,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
+ 'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
@@ -1226,6 +1413,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
+ 'is_live': is_live,
+ 'start_time': start_time,
+ 'end_time': end_time,
}
@@ -1618,7 +1808,7 @@ class YoutubeSearchURLIE(InfoExtractor):
r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
- r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
+ r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
@@ -1645,8 +1835,8 @@ class YoutubeShowIE(InfoExtractor):
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
- 'url': 'http://www.youtube.com/show/airdisasters',
- 'playlist_mincount': 3,
+ 'url': 'https://www.youtube.com/show/airdisasters',
+ 'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
@@ -1657,7 +1847,7 @@ class YoutubeShowIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(
- url, playlist_id, 'Downloading show webpage')
+ 'https://www.youtube.com/show/%s/playlists' % playlist_id, playlist_id, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 85365d769..5eccc0a70 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -2,7 +2,6 @@ from __future__ import unicode_literals
import os.path
import optparse
-import shlex
import sys
from .downloader.external import list_external_downloaders
@@ -11,6 +10,7 @@ from .compat import (
compat_get_terminal_size,
compat_getenv,
compat_kwargs,
+ compat_shlex_split,
)
from .utils import (
preferredencoding,
@@ -28,7 +28,7 @@ def parseOpts(overrideArguments=None):
try:
res = []
for l in optionf:
- res += shlex.split(l, comments=True)
+ res += compat_shlex_split(l, comments=True)
finally:
optionf.close()
return res
@@ -219,7 +219,7 @@ def parseOpts(overrideArguments=None):
selection.add_option(
'--playlist-items',
dest='playlist_items', metavar='ITEM_SPEC', default=None,
- help='Playlist video items to download. Specify indices of the videos in the playlist seperated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
+ help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
selection.add_option(
'--match-title',
dest='matchtitle', metavar='REGEX',
@@ -320,7 +320,7 @@ def parseOpts(overrideArguments=None):
authentication.add_option(
'--video-password',
dest='videopassword', metavar='PASSWORD',
- help='Video password (vimeo, smotri)')
+ help='Video password (vimeo, smotri, youku)')
video_format = optparse.OptionGroup(parser, 'Video Format Options')
video_format.add_option(
diff --git a/youtube_dl/postprocessor/common.py b/youtube_dl/postprocessor/common.py
index 4191d040b..599dd1df2 100644
--- a/youtube_dl/postprocessor/common.py
+++ b/youtube_dl/postprocessor/common.py
@@ -4,6 +4,7 @@ import os
from ..utils import (
PostProcessingError,
+ cli_configuration_args,
encodeFilename,
)
@@ -61,11 +62,7 @@ class PostProcessor(object):
self._downloader.report_warning(errnote)
def _configuration_args(self, default=[]):
- pp_args = self._downloader.params.get('postprocessor_args')
- if pp_args is None:
- return default
- assert isinstance(pp_args, list)
- return pp_args
+ return cli_configuration_args(self._downloader.params, 'postprocessor_args', default)
class AudioConversionError(PostProcessingError):
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 942f76d24..206dd56bc 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -139,21 +139,24 @@ def write_json_file(obj, fn):
if sys.version_info >= (2, 7):
- def find_xpath_attr(node, xpath, key, val):
+ def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
- assert re.match(r'^[a-zA-Z-]+$', key)
- assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
- expr = xpath + "[@%s='%s']" % (key, val)
+ assert re.match(r'^[a-zA-Z_-]+$', key)
+ if val:
+ assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
+ expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
- def find_xpath_attr(node, xpath, key, val):
+ def find_xpath_attr(node, xpath, key, val=None):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
for f in node.findall(xpath):
- if f.attrib.get(key) == val:
+ if key not in f.attrib:
+ continue
+ if val is None or f.attrib.get(key) == val:
return f
return None
@@ -173,12 +176,12 @@ def xpath_with_ns(path, ns_map):
return '/'.join(replaced)
-def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
+def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
if sys.version_info < (2, 7): # Crazy 2.6
xpath = xpath.encode('ascii')
n = node.find(xpath)
- if n is None or n.text is None:
+ if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
@@ -186,9 +189,37 @@ def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
+ return n
+
+
+def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
+ n = xpath_element(node, xpath, name, fatal=fatal, default=default)
+ if n is None or n == default:
+ return n
+ if n.text is None:
+ if default is not NO_DEFAULT:
+ return default
+ elif fatal:
+ name = xpath if name is None else name
+ raise ExtractorError('Could not find XML element\'s text %s' % name)
+ else:
+ return None
return n.text
+def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
+ n = find_xpath_attr(node, xpath, key)
+ if n is None:
+ if default is not NO_DEFAULT:
+ return default
+ elif fatal:
+ name = '%s[@%s]' % (xpath, key) if name is None else name
+ raise ExtractorError('Could not find XML attribute %s' % name)
+ else:
+ return None
+ return n.attrib[key]
+
+
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute("id", id, html)
@@ -576,16 +607,19 @@ class ContentTooShortError(Exception):
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
- # Both in bytes
- downloaded = None
- expected = None
def __init__(self, downloaded, expected):
+ # Both in bytes
self.downloaded = downloaded
self.expected = expected
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
+ # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
+ # expected HTTP responses to meet HTTP/1.0 or later (see also
+ # https://github.com/rg3/youtube-dl/issues/6727)
+ if sys.version_info < (3, 0):
+ kwargs['strict'] = True
hc = http_class(*args, **kwargs)
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
@@ -650,6 +684,26 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
return ret
def http_request(self, req):
+ # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
+ # always respected by websites, some tend to give out URLs with non percent-encoded
+ # non-ASCII characters (see telemb.py, ard.py [#3412])
+ # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
+ # To work around aforementioned issue we will replace request's original URL with
+ # percent-encoded one
+ # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
+ # the code of this workaround has been moved here from YoutubeDL.urlopen()
+ url = req.get_full_url()
+ url_escaped = escape_url(url)
+
+ # Substitute URL if any change after escaping
+ if url != url_escaped:
+ req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
+ new_req = req_type(
+ url_escaped, data=req.data, headers=req.headers,
+ origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
+ new_req.timeout = req.timeout
+ req = new_req
+
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
@@ -694,6 +748,18 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
+ # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
+ # https://github.com/rg3/youtube-dl/issues/6457).
+ if 300 <= resp.code < 400:
+ location = resp.headers.get('Location')
+ if location:
+ # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
+ if sys.version_info >= (3, 0):
+ location = location.encode('iso-8859-1').decode('utf-8')
+ location_escaped = escape_url(location)
+ if location != location_escaped:
+ del resp.headers['Location']
+ resp.headers['Location'] = location_escaped
return resp
https_request = http_request
@@ -717,6 +783,30 @@ class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
req, **kwargs)
+class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
+ def __init__(self, cookiejar=None):
+ compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
+
+ def http_response(self, request, response):
+ # Python 2 will choke on next HTTP request in row if there are non-ASCII
+ # characters in Set-Cookie HTTP header of last response (see
+ # https://github.com/rg3/youtube-dl/issues/6769).
+ # In order to at least prevent crashing we will percent encode Set-Cookie
+ # header before HTTPCookieProcessor starts processing it.
+ # if sys.version_info < (3, 0) and response.headers:
+ # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
+ # set_cookie = response.headers.get(set_cookie_header)
+ # if set_cookie:
+ # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
+ # if set_cookie != set_cookie_escaped:
+ # del response.headers[set_cookie_header]
+ # response.headers[set_cookie_header] = set_cookie_escaped
+ return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
+
+ https_request = compat_urllib_request.HTTPCookieProcessor.http_request
+ https_response = http_response
+
+
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
@@ -1309,10 +1399,10 @@ def parse_duration(s):
m = re.match(
r'''(?ix)(?:P?T)?
(?:
- (?P<only_mins>[0-9.]+)\s*(?:mins?|minutes?)\s*|
+ (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*|
(?P<only_hours>[0-9.]+)\s*(?:hours?)|
- \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*|
+ \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*|
(?:
(?:
(?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)?
@@ -1546,6 +1636,10 @@ def urlencode_postdata(*args, **kargs):
return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
+def encode_dict(d, encoding='utf-8'):
+ return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items())
+
+
try:
etree_iter = xml.etree.ElementTree.Element.iter
except AttributeError: # Python <=2.6
@@ -1886,6 +1980,32 @@ def dfxp2srt(dfxp_data):
return ''.join(out)
+def cli_option(params, command_option, param):
+ param = params.get(param)
+ return [command_option, param] if param is not None else []
+
+
+def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
+ param = params.get(param)
+ assert isinstance(param, bool)
+ if separator:
+ return [command_option + separator + (true_value if param else false_value)]
+ return [command_option, true_value if param else false_value]
+
+
+def cli_valueless_option(params, command_option, param, expected_value=True):
+ param = params.get(param)
+ return [command_option] if param == expected_value else []
+
+
+def cli_configuration_args(params, param, default=[]):
+ ex_args = params.get(param)
+ if ex_args is None:
+ return default
+ assert isinstance(ex_args, list)
+ return ex_args
+
+
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 3ad7a2bc0..0cc7411f2 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,3 +1,3 @@
from __future__ import unicode_literals
-__version__ = '2015.07.18'
+__version__ = '2015.09.09'