diff options
Diffstat (limited to 'youtube_dl')
267 files changed, 9541 insertions, 4501 deletions
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index d65253882..50425b8d7 100755 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -28,6 +28,7 @@ if os.name == 'nt': import ctypes from .compat import ( + compat_basestring, compat_cookiejar, compat_expanduser, compat_get_terminal_size, @@ -37,6 +38,7 @@ from .compat import ( compat_tokenize_tokenize, compat_urllib_error, compat_urllib_request, + compat_urllib_request_DataHandler, ) from .utils import ( ContentTooShortError, @@ -45,7 +47,9 @@ from .utils import ( DEFAULT_OUTTMPL, determine_ext, DownloadError, + encode_compat_str, encodeFilename, + error_to_compat_str, ExtractorError, format_bytes, formatSeconds, @@ -62,6 +66,7 @@ from .utils import ( SameFileError, sanitize_filename, sanitize_path, + sanitized_Request, std_headers, subtitles_filename, UnavailableVideoError, @@ -155,7 +160,7 @@ class YoutubeDL(object): writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file - writeautomaticsub: Write the automatic subtitles to a file + writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video @@ -492,7 +497,7 @@ class YoutubeDL(object): tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) - tb += compat_str(traceback.format_exc()) + tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) @@ -571,7 +576,7 @@ class YoutubeDL(object): if v is not None) template_dict = collections.defaultdict(lambda: 'NA', template_dict) - outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL)) + outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) tmpl = compat_expanduser(outtmpl) filename = tmpl % template_dict # Temporary fix for #4787 @@ -579,7 +584,7 @@ class YoutubeDL(object): # to workaround encoding issues with subprocess on python2 @ Windows if sys.version_info < (3, 0) and sys.platform == 'win32': filename = encodeFilename(filename, True).decode(preferredencoding()) - return filename + return sanitize_path(filename) except ValueError as err: self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None @@ -671,14 +676,14 @@ class YoutubeDL(object): return self.process_ie_result(ie_result, download, extra_info) else: return ie_result - except ExtractorError as de: # An error we somewhat expected - self.report_error(compat_str(de), de.format_traceback()) + except ExtractorError as e: # An error we somewhat expected + self.report_error(compat_str(e), e.format_traceback()) break except MaxDownloadsReached: raise except Exception as e: if self.params.get('ignoreerrors', False): - self.report_error(compat_str(e), tb=compat_str(traceback.format_exc())) + self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc())) break else: raise @@ -832,6 +837,7 @@ class YoutubeDL(object): extra_info=extra) playlist_results.append(entry_result) ie_result['entries'] = playlist_results + self.to_screen('[download] Finished downloading playlist: %s' % playlist) return ie_result elif result_type == 'compat_list': self.report_warning( @@ -936,7 +942,7 @@ class YoutubeDL(object): filter_parts.append(string) def _remove_unused_ops(tokens): - # Remove operators that we don't use and join them with the sourrounding strings + # Remove operators that we don't use and join them with the surrounding strings # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' ALLOWED_OPS = ('/', '+', ',', '(', ')') last_string, last_start, last_end, last_line = None, None, None, None @@ -1106,6 +1112,12 @@ class YoutubeDL(object): 'contain the video, try using ' '"-f %s+%s"' % (format_2, format_1)) return + # Formats must be opposite (video+audio) + if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none': + self.report_error( + 'Both formats %s and %s are video-only, you must specify "-f video+audio"' + % (format_1, format_2)) + return output_ext = ( formats_info[0]['ext'] if self.params.get('merge_output_format') is None @@ -1185,7 +1197,7 @@ class YoutubeDL(object): return res def _calc_cookies(self, info_dict): - pr = compat_urllib_request.Request(info_dict['url']) + pr = sanitized_Request(info_dict['url']) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') @@ -1232,13 +1244,20 @@ class YoutubeDL(object): except (ValueError, OverflowError, OSError): pass + subtitles = info_dict.get('subtitles') + if subtitles: + for _, subtitle in subtitles.items(): + for subtitle_format in subtitle: + if 'ext' not in subtitle_format: + subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower() + if self.params.get('listsubtitles', False): if 'automatic_captions' in info_dict: self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions') - self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles') + self.list_subtitles(info_dict['id'], subtitles, 'subtitles') return info_dict['requested_subtitles'] = self.process_subtitles( - info_dict['id'], info_dict.get('subtitles'), + info_dict['id'], subtitles, info_dict.get('automatic_captions')) # We now pick which formats have to be downloaded @@ -1442,7 +1461,7 @@ class YoutubeDL(object): if dn and not os.path.exists(dn): os.makedirs(dn) except (OSError, IOError) as err: - self.report_error('unable to create directory ' + compat_str(err)) + self.report_error('unable to create directory ' + error_to_compat_str(err)) return if self.params.get('writedescription', False): @@ -1493,7 +1512,7 @@ class YoutubeDL(object): sub_info['url'], info_dict['id'], note=False) except ExtractorError as err: self.report_warning('Unable to download subtitle for "%s": %s' % - (sub_lang, compat_str(err.cause))) + (sub_lang, error_to_compat_str(err.cause))) continue try: sub_filename = subtitles_filename(filename, sub_lang, sub_format) @@ -1862,6 +1881,8 @@ class YoutubeDL(object): def urlopen(self, req): """ Start an HTTP download """ + if isinstance(req, compat_basestring): + req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) def print_debug_header(self): @@ -1960,8 +1981,9 @@ class YoutubeDL(object): debuglevel = 1 if self.params.get('debug_printtraffic') else 0 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) + data_handler = compat_urllib_request_DataHandler() opener = compat_urllib_request.build_opener( - proxy_handler, https_handler, cookie_processor, ydlh) + proxy_handler, https_handler, cookie_processor, ydlh, data_handler) # Delete the default user-agent header, which would otherwise apply in # cases where our custom HTTP handler doesn't come into play @@ -2019,4 +2041,4 @@ class YoutubeDL(object): (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename)) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_warning('Unable to download thumbnail "%s": %s' % - (t['url'], compat_str(err))) + (t['url'], error_to_compat_str(err))) diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 5e2ed4d4b..9f131f5db 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -377,7 +377,7 @@ def _real_main(argv=None): with YoutubeDL(ydl_opts) as ydl: # Update version if opts.update_self: - update_self(ydl.to_screen, opts.verbose) + update_self(ydl.to_screen, opts.verbose, ydl._opener) # Remove cache dir if opts.rm_cachedir: diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py index 65a0f891c..42a0f8c6f 100755 --- a/youtube_dl/__main__.py +++ b/youtube_dl/__main__.py @@ -11,7 +11,7 @@ if __package__ is None and not hasattr(sys, "frozen"): # direct call of __main__.py import os.path path = os.path.realpath(os.path.abspath(__file__)) - sys.path.append(os.path.dirname(os.path.dirname(path))) + sys.path.insert(0, os.path.dirname(os.path.dirname(path))) import youtube_dl diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py index 1ff42d94b..a3e85264a 100644 --- a/youtube_dl/compat.py +++ b/youtube_dl/compat.py @@ -1,7 +1,10 @@ from __future__ import unicode_literals +import binascii import collections +import email import getpass +import io import optparse import os import re @@ -11,6 +14,7 @@ import socket import subprocess import sys import itertools +import xml.etree.ElementTree try: @@ -39,6 +43,11 @@ except ImportError: # Python 2 import urlparse as compat_urlparse try: + import urllib.response as compat_urllib_response +except ImportError: # Python 2 + import urllib as compat_urllib_response + +try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar @@ -156,6 +165,40 @@ except ImportError: # Python 2 return compat_urllib_parse_unquote(string, encoding, errors) try: + from urllib.request import DataHandler as compat_urllib_request_DataHandler +except ImportError: # Python < 3.4 + # Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py + class compat_urllib_request_DataHandler(compat_urllib_request.BaseHandler): + def data_open(self, req): + # data URLs as specified in RFC 2397. + # + # ignores POSTed data + # + # syntax: + # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data + # mediatype := [ type "/" subtype ] *( ";" parameter ) + # data := *urlchar + # parameter := attribute "=" value + url = req.get_full_url() + + scheme, data = url.split(":", 1) + mediatype, data = data.split(",", 1) + + # even base64 encoded data URLs might be quoted so unquote in any case: + data = compat_urllib_parse_unquote_to_bytes(data) + if mediatype.endswith(";base64"): + data = binascii.a2b_base64(data) + mediatype = mediatype[:-7] + + if not mediatype: + mediatype = "text/plain;charset=US-ASCII" + + headers = email.message_from_string( + "Content-type: %s\nContent-length: %d\n" % (mediatype, len(data))) + + return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url) + +try: compat_basestring = basestring # Python 2 except NameError: compat_basestring = str @@ -170,6 +213,43 @@ try: except ImportError: # Python 2.6 from xml.parsers.expat import ExpatError as compat_xml_parse_error +if sys.version_info[0] >= 3: + compat_etree_fromstring = xml.etree.ElementTree.fromstring +else: + # python 2.x tries to encode unicode strings with ascii (see the + # XMLParser._fixtext method) + etree = xml.etree.ElementTree + + try: + _etree_iter = etree.Element.iter + except AttributeError: # Python <=2.6 + def _etree_iter(root): + for el in root.findall('*'): + yield el + for sub in _etree_iter(el): + yield sub + + # on 2.6 XML doesn't have a parser argument, function copied from CPython + # 2.7 source + def _XML(text, parser=None): + if not parser: + parser = etree.XMLParser(target=etree.TreeBuilder()) + parser.feed(text) + return parser.close() + + def _element_factory(*args, **kwargs): + el = etree.Element(*args, **kwargs) + for k, v in el.items(): + if isinstance(v, bytes): + el.set(k, v.decode('utf-8')) + return el + + def compat_etree_fromstring(text): + doc = _XML(text, parser=etree.XMLParser(target=etree.TreeBuilder(element_factory=_element_factory))) + for el in _etree_iter(doc): + if el.text is not None and isinstance(el.text, bytes): + el.text = el.text.decode('utf-8') + return doc try: from urllib.parse import parse_qs as compat_parse_qs @@ -416,26 +496,32 @@ if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3 else: _terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines']) - def compat_get_terminal_size(): - columns = compat_getenv('COLUMNS', None) + def compat_get_terminal_size(fallback=(80, 24)): + columns = compat_getenv('COLUMNS') if columns: columns = int(columns) else: columns = None - lines = compat_getenv('LINES', None) + lines = compat_getenv('LINES') if lines: lines = int(lines) else: lines = None - try: - sp = subprocess.Popen( - ['stty', 'size'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = sp.communicate() - lines, columns = map(int, out.split()) - except Exception: - pass + if columns is None or lines is None or columns <= 0 or lines <= 0: + try: + sp = subprocess.Popen( + ['stty', 'size'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = sp.communicate() + _lines, _columns = map(int, out.split()) + except Exception: + _columns, _lines = _terminal_size(*fallback) + + if columns is None or columns <= 0: + columns = _columns + if lines is None or lines <= 0: + lines = _lines return _terminal_size(columns, lines) try: @@ -459,6 +545,7 @@ __all__ = [ 'compat_chr', 'compat_cookiejar', 'compat_cookies', + 'compat_etree_fromstring', 'compat_expanduser', 'compat_get_terminal_size', 'compat_getenv', @@ -483,6 +570,8 @@ __all__ = [ 'compat_urllib_parse_unquote_to_bytes', 'compat_urllib_parse_urlparse', 'compat_urllib_request', + 'compat_urllib_request_DataHandler', + 'compat_urllib_response', 'compat_urlparse', 'compat_urlretrieve', 'compat_xml_parse_error', diff --git a/youtube_dl/downloader/common.py b/youtube_dl/downloader/common.py index 97e755d4b..beae8c4d0 100644 --- a/youtube_dl/downloader/common.py +++ b/youtube_dl/downloader/common.py @@ -5,9 +5,9 @@ import re import sys import time -from ..compat import compat_str from ..utils import ( encodeFilename, + error_to_compat_str, decodeArgument, format_bytes, timeconvert, @@ -42,7 +42,7 @@ class FileDownloader(object): min_filesize: Skip files smaller than this size max_filesize: Skip files larger than this size xattr_set_filesize: Set ytdl.filesize user xattribute with expected size. - (experimenatal) + (experimental) external_downloader_args: A list of additional command-line arguments for the external downloader. @@ -186,7 +186,7 @@ class FileDownloader(object): return os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) except (IOError, OSError) as err: - self.report_error('unable to rename file: %s' % compat_str(err)) + self.report_error('unable to rename file: %s' % error_to_compat_str(err)) def try_utime(self, filename, last_modified_hdr): """Try to set the last-modified time of the given file.""" @@ -325,7 +325,7 @@ class FileDownloader(object): ) # Check file already present - if filename != '-' and nooverwrites_and_exists or continuedl_and_exists: + if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists): self.report_file_already_downloaded(filename) self._hook_progress({ 'filename': filename, diff --git a/youtube_dl/downloader/dash.py b/youtube_dl/downloader/dash.py index 8b6fa2753..535f2a7fc 100644 --- a/youtube_dl/downloader/dash.py +++ b/youtube_dl/downloader/dash.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re from .common import FileDownloader -from ..compat import compat_urllib_request +from ..utils import sanitized_Request class DashSegmentsFD(FileDownloader): @@ -22,7 +22,7 @@ class DashSegmentsFD(FileDownloader): def append_url_to_file(outf, target_url, target_name, remaining_bytes=None): self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name)) - req = compat_urllib_request.Request(target_url) + req = sanitized_Request(target_url) if remaining_bytes is not None: req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1)) diff --git a/youtube_dl/downloader/f4m.py b/youtube_dl/downloader/f4m.py index 174180db5..aaf0c49c8 100644 --- a/youtube_dl/downloader/f4m.py +++ b/youtube_dl/downloader/f4m.py @@ -5,15 +5,17 @@ import io import itertools import os import time -import xml.etree.ElementTree as etree from .fragment import FragmentFD from ..compat import ( + compat_etree_fromstring, compat_urlparse, compat_urllib_error, + compat_urllib_parse_urlparse, ) from ..utils import ( encodeFilename, + fix_xml_ampersands, sanitize_open, struct_pack, struct_unpack, @@ -285,9 +287,14 @@ class F4mFD(FragmentFD): man_url = info_dict['url'] requested_bitrate = info_dict.get('tbr') self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME) - manifest = self.ydl.urlopen(man_url).read() - - doc = etree.fromstring(manifest) + urlh = self.ydl.urlopen(man_url) + man_url = urlh.geturl() + # Some manifests may be malformed, e.g. prosiebensat1 generated manifests + # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244 + # and https://github.com/rg3/youtube-dl/issues/7823) + manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip() + + doc = compat_etree_fromstring(manifest) formats = [(int(f.attrib.get('bitrate', -1)), f) for f in self._get_unencrypted_media(doc)] if requested_bitrate is None: @@ -329,20 +336,25 @@ class F4mFD(FragmentFD): if not live: write_metadata_tag(dest_stream, metadata) + base_url_parsed = compat_urllib_parse_urlparse(base_url) + self._start_frag_download(ctx) frags_filenames = [] while fragments_list: seg_i, frag_i = fragments_list.pop(0) name = 'Seg%d-Frag%d' % (seg_i, frag_i) - url = base_url + name + query = [] + if base_url_parsed.query: + query.append(base_url_parsed.query) if akamai_pv: - url += '?' + akamai_pv.strip(';') + query.append(akamai_pv.strip(';')) if info_dict.get('extra_param_to_segment_url'): - url += info_dict.get('extra_param_to_segment_url') + query.append(info_dict['extra_param_to_segment_url']) + url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query)) frag_filename = '%s-%s' % (ctx['tmpfilename'], name) try: - success = ctx['dl'].download(frag_filename, {'url': url}) + success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()}) if not success: return False (down, frag_sanitized) = sanitize_open(frag_filename, 'rb') diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py index a62d2047b..b5a3e1167 100644 --- a/youtube_dl/downloader/hls.py +++ b/youtube_dl/downloader/hls.py @@ -13,6 +13,7 @@ from ..utils import ( encodeArgument, encodeFilename, sanitize_open, + handle_youtubedl_headers, ) @@ -30,12 +31,13 @@ class HlsFD(FileDownloader): args = [ffpp.executable, '-y'] - if info_dict['http_headers']: + if info_dict['http_headers'] and re.match(r'^https?://', url): # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv: # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header. + headers = handle_youtubedl_headers(info_dict['http_headers']) args += [ '-headers', - ''.join('%s: %s\r\n' % (key, val) for key, val in info_dict['http_headers'].items())] + ''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())] args += ['-i', url, '-f', 'mp4', '-c', 'copy', '-bsf:a', 'aac_adtstoasc'] diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py index a29f5cf31..56840e026 100644 --- a/youtube_dl/downloader/http.py +++ b/youtube_dl/downloader/http.py @@ -7,14 +7,12 @@ import time import re from .common import FileDownloader -from ..compat import ( - compat_urllib_request, - compat_urllib_error, -) +from ..compat import compat_urllib_error from ..utils import ( ContentTooShortError, encodeFilename, sanitize_open, + sanitized_Request, ) @@ -29,8 +27,8 @@ class HttpFD(FileDownloader): add_headers = info_dict.get('http_headers') if add_headers: headers.update(add_headers) - basic_request = compat_urllib_request.Request(url, None, headers) - request = compat_urllib_request.Request(url, None, headers) + basic_request = sanitized_Request(url, None, headers) + request = sanitized_Request(url, None, headers) is_test = self.params.get('test', False) diff --git a/youtube_dl/downloader/rtmp.py b/youtube_dl/downloader/rtmp.py index 7d19bb808..14d56db47 100644 --- a/youtube_dl/downloader/rtmp.py +++ b/youtube_dl/downloader/rtmp.py @@ -105,7 +105,7 @@ class RtmpFD(FileDownloader): protocol = info_dict.get('rtmp_protocol', None) real_time = info_dict.get('rtmp_real_time', False) no_resume = info_dict.get('no_resume', False) - continue_dl = info_dict.get('continuedl', True) + continue_dl = self.params.get('continuedl', True) self.report_destination(filename) tmpfilename = self.temp_name(filename) @@ -117,7 +117,7 @@ class RtmpFD(FileDownloader): return False # Download using rtmpdump. rtmpdump returns exit code 2 when - # the connection was interrumpted and resuming appears to be + # the connection was interrupted and resuming appears to be # possible. This is part of rtmpdump's normal usage, AFAIK. basic_args = [ 'rtmpdump', '--verbose', '-r', url, diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index 1813c7e1b..75d191d5d 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -3,13 +3,18 @@ from __future__ import unicode_literals from .abc import ABCIE from .abc7news import Abc7NewsIE from .academicearth import AcademicEarthCourseIE +from .acast import ( + ACastIE, + ACastChannelIE, +) from .addanime import AddAnimeIE from .adobetv import ( AdobeTVIE, + AdobeTVShowIE, + AdobeTVChannelIE, AdobeTVVideoIE, ) from .adultswim import AdultSwimIE -from .aftenposten import AftenpostenIE from .aftonbladet import AftonbladetIE from .airmozilla import AirMozillaIE from .aljazeera import AlJazeeraIE @@ -20,7 +25,10 @@ from .aol import AolIE from .allocine import AllocineIE from .aparat import AparatIE from .appleconnect import AppleConnectIE -from .appletrailers import AppleTrailersIE +from .appletrailers import ( + AppleTrailersIE, + AppleTrailersSectionIE, +) from .archiveorg import ArchiveOrgIE from .ard import ( ARDIE, @@ -38,6 +46,7 @@ from .arte import ( ) from .atresplayer import AtresPlayerIE from .atttechchannel import ATTTechChannelIE +from .audimedia import AudiMediaIE from .audiomack import AudiomackIE, AudiomackAlbumIE from .azubu import AzubuIE from .baidu import BaiduVideoIE @@ -45,6 +54,7 @@ from .bambuser import BambuserIE, BambuserChannelIE from .bandcamp import BandcampIE, BandcampAlbumIE from .bbc import ( BBCCoUkIE, + BBCCoUkArticleIE, BBCIE, ) from .beeg import BeegIE @@ -53,13 +63,19 @@ from .beatportpro import BeatportProIE from .bet import BetIE from .bild import BildIE from .bilibili import BiliBiliIE +from .bleacherreport import ( + BleacherReportIE, + BleacherReportCMSIE, +) from .blinkx import BlinkxIE -from .bliptv import BlipTVIE, BlipTVUserIE from .bloomberg import BloombergIE from .bpb import BpbIE from .br import BRIE from .breakcom import BreakIE -from .brightcove import BrightcoveIE +from .brightcove import ( + BrightcoveLegacyIE, + BrightcoveNewIE, +) from .buzzfeed import BuzzFeedIE from .byutv import BYUtvIE from .c56 import C56IE @@ -67,7 +83,6 @@ from .camdemy import ( CamdemyIE, CamdemyFolderIE ) -from .canal13cl import Canal13clIE from .canalplus import CanalplusIE from .canalc2 import Canalc2IE from .cbs import CBSIE @@ -76,6 +91,7 @@ from .cbssports import CBSSportsIE from .ccc import CCCIE from .ceskatelevize import CeskaTelevizeIE from .channel9 import Channel9IE +from .chaturbate import ChaturbateIE from .chilloutzone import ChilloutzoneIE from .chirbit import ( ChirbitIE, @@ -88,6 +104,7 @@ from .cliphunter import CliphunterIE from .clipsyndicate import ClipsyndicateIE from .cloudy import CloudyIE from .clubic import ClubicIE +from .clyp import ClypIE from .cmt import CMTIE from .cnet import CNETIE from .cnn import ( @@ -116,15 +133,25 @@ from .dailymotion import ( DailymotionUserIE, DailymotionCloudIE, ) -from .daum import DaumIE +from .daum import ( + DaumIE, + DaumClipIE, +) from .dbtv import DBTVIE -from .dcn import DCNIE +from .dcn import ( + DCNIE, + DCNVideoIE, + DCNLiveIE, + DCNSeasonIE, +) from .dctp import DctpTvIE from .deezer import DeezerPlaylistIE +from .democracynow import DemocracynowIE from .dfb import DFBIE from .dhm import DHMIE from .dotsub import DotsubIE from .douyutv import DouyuTVIE +from .dplay import DPlayIE from .dramafever import ( DramaFeverIE, DramaFeverSeriesIE, @@ -158,6 +185,7 @@ from .eroprofile import EroProfileIE from .escapist import EscapistIE from .espn import ESPNIE from .esri import EsriVideoIE +from .europa import EuropaIE from .everyonesmixtape import EveryonesMixtapeIE from .exfm import ExfmIE from .expotv import ExpoTVIE @@ -165,14 +193,12 @@ from .extremetube import ExtremeTubeIE from .facebook import FacebookIE from .faz import FazIE from .fc2 import FC2IE +from .fczenit import FczenitIE from .firstpost import FirstpostIE from .firsttv import FirstTVIE from .fivemin import FiveMinIE from .fivetv import FiveTVIE -from .fktv import ( - FKTVIE, - FKTVPosteckeIE, -) +from .fktv import FKTVIE from .flickr import FlickrIE from .folketinget import FolketingetIE from .footyroom import FootyRoomIE @@ -180,7 +206,10 @@ from .fourtube import FourTubeIE from .foxgay import FoxgayIE from .foxnews import FoxNewsIE from .foxsports import FoxSportsIE -from .franceculture import FranceCultureIE +from .franceculture import ( + FranceCultureIE, + FranceCultureEmissionIE, +) from .franceinter import FranceInterIE from .francetv import ( PluzzIE, @@ -192,7 +221,9 @@ from .francetv import ( from .freesound import FreesoundIE from .freespeech import FreespeechIE from .freevideo import FreeVideoIE +from .funimation import FunimationIE from .funnyordie import FunnyOrDieIE +from .gameinformer import GameInformerIE from .gamekings import GamekingsIE from .gameone import ( GameOneIE, @@ -209,14 +240,18 @@ from .gfycat import GfycatIE from .giantbomb import GiantBombIE from .giga import GigaIE from .glide import GlideIE -from .globo import GloboIE +from .globo import ( + GloboIE, + GloboArticleIE, +) from .godtube import GodTubeIE from .goldenmoustache import GoldenMoustacheIE from .golem import GolemIE +from .googledrive import GoogleDriveIE from .googleplus import GooglePlusIE from .googlesearch import GoogleSearchIE -from .gorillavid import GorillaVidIE from .goshgay import GoshgayIE +from .gputechconf import GPUTechConfIE from .groupon import GrouponIE from .hark import HarkIE from .hearthisat import HearThisAtIE @@ -229,12 +264,17 @@ from .history import HistoryIE from .hitbox import HitboxIE, HitboxLiveIE from .hornbunny import HornBunnyIE from .hotnewhiphop import HotNewHipHopIE +from .hotstar import HotStarIE from .howcast import HowcastIE from .howstuffworks import HowStuffWorksIE from .huffpost import HuffPostIE from .hypem import HypemIE from .iconosquare import IconosquareIE -from .ign import IGNIE, OneUPIE +from .ign import ( + IGNIE, + OneUPIE, + PCMagIE, +) from .imdb import ( ImdbIE, ImdbListIE @@ -262,6 +302,7 @@ from .izlesene import IzleseneIE from .jadorecettepub import JadoreCettePubIE from .jeuxvideo import JeuxVideoIE from .jove import JoveIE +from .jwplatform import JWPlatformIE from .jpopsukitv import JpopsukiIE from .kaltura import KalturaIE from .kanalplay import KanalPlayIE @@ -296,6 +337,11 @@ from .lifenews import ( LifeNewsIE, LifeEmbedIE, ) +from .limelight import ( + LimelightMediaIE, + LimelightChannelIE, + LimelightChannelListIE, +) from .liveleak import LiveLeakIE from .livestream import ( LivestreamIE, @@ -311,9 +357,9 @@ from .lynda import ( from .m6 import M6IE from .macgamestore import MacGameStoreIE from .mailru import MailRuIE +from .makertv import MakerTVIE from .malemotion import MalemotionIE from .mdr import MDRIE -from .megavideoz import MegaVideozIE from .metacafe import MetacafeIE from .metacritic import MetacriticIE from .mgoon import MgoonIE @@ -335,7 +381,6 @@ from .motherless import MotherlessIE from .motorsport import MotorsportIE from .movieclips import MovieClipsIE from .moviezine import MoviezineIE -from .movshare import MovShareIE from .mtv import ( MTVIE, MTVServicesEmbeddedIE, @@ -401,14 +446,22 @@ from .noco import NocoIE from .normalboots import NormalbootsIE from .nosvideo import NosVideoIE from .nova import NovaIE -from .novamov import NovaMovIE +from .novamov import ( + NovaMovIE, + WholeCloudIE, + NowVideoIE, + VideoWeedIE, + CloudTimeIE, +) from .nowness import ( NownessIE, NownessPlaylistIE, NownessSeriesIE, ) -from .nowtv import NowTVIE -from .nowvideo import NowVideoIE +from .nowtv import ( + NowTVIE, + NowTVListIE, +) from .npo import ( NPOIE, NPOLiveIE, @@ -445,10 +498,7 @@ from .orf import ( from .parliamentliveuk import ParliamentLiveUKIE from .patreon import PatreonIE from .pbs import PBSIE -from .periscope import ( - PeriscopeIE, - QuickscopeIE, -) +from .periscope import PeriscopeIE from .philharmoniedeparis import PhilharmonieDeParisIE from .phoenix import PhoenixIE from .photobucket import PhotobucketIE @@ -492,7 +542,10 @@ from .radiode import RadioDeIE from .radiojavan import RadioJavanIE from .radiobremen import RadioBremenIE from .radiofrance import RadioFranceIE -from .rai import RaiIE +from .rai import ( + RaiTVIE, + RaiIE, +) from .rbmaradio import RBMARadioIE from .rds import RDSIE from .redtube import RedTubeIE @@ -540,6 +593,10 @@ from .shahid import ShahidIE from .shared import SharedIE from .sharesix import ShareSixIE from .sina import SinaIE +from .skynewsarabia import ( + SkyNewsArabiaIE, + SkyNewsArabiaArticleIE, +) from .slideshare import SlideshareIE from .slutload import SlutloadIE from .smotri import ( @@ -554,15 +611,12 @@ from .snagfilms import ( ) from .snotr import SnotrIE from .sohu import SohuIE -from .soompi import ( - SoompiIE, - SoompiShowIE, -) from .soundcloud import ( SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE, - SoundcloudPlaylistIE + SoundcloudPlaylistIE, + SoundcloudSearchIE ) from .soundgasm import ( SoundgasmIE, @@ -581,13 +635,17 @@ from .spankwire import SpankwireIE from .spiegel import SpiegelIE, SpiegelArticleIE from .spiegeltv import SpiegeltvIE from .spike import SpikeIE +from .stitcher import StitcherIE from .sport5 import Sport5IE from .sportbox import ( SportBoxIE, SportBoxEmbedIE, ) from .sportdeutschland import SportDeutschlandIE -from .srf import SrfIE +from .srgssr import ( + SRGSSRIE, + SRGSSRPlayIE, +) from .srmediathek import SRMediathekIE from .ssa import SSAIE from .stanfordoc import StanfordOpenClassroomIE @@ -614,6 +672,7 @@ from .teachingchannel import TeachingChannelIE from .teamcoco import TeamcocoIE from .techtalks import TechTalksIE from .ted import TEDIE +from .tele13 import Tele13IE from .telebruxelles import TeleBruxellesIE from .telecinco import TelecincoIE from .telegraaf import TelegraafIE @@ -623,6 +682,7 @@ from .tenplay import TenPlayIE from .testurl import TestURLIE from .testtube import TestTubeIE from .tf1 import TF1IE +from .theintercept import TheInterceptIE from .theonion import TheOnionIE from .theplatform import ( ThePlatformIE, @@ -642,6 +702,7 @@ from .tnaflix import ( EMPFlixIE, MovieFapIE, ) +from .toggle import ToggleIE from .thvideo import ( THVideoIE, THVideoPlaylistIE @@ -655,7 +716,13 @@ from .tube8 import Tube8IE from .tubitv import TubiTvIE from .tudou import TudouIE from .tumblr import TumblrIE -from .tunein import TuneInIE +from .tunein import ( + TuneInClipIE, + TuneInStationIE, + TuneInProgramIE, + TuneInTopicIE, + TuneInShortenerIE, +) from .turbo import TurboIE from .tutv import TutvIE from .tv2 import ( @@ -685,7 +752,7 @@ from .twitch import ( TwitchBookmarksIE, TwitchStreamIE, ) -from .twitter import TwitterCardIE +from .twitter import TwitterCardIE, TwitterIE from .ubu import UbuIE from .udemy import ( UdemyIE, @@ -712,16 +779,15 @@ from .vh1 import VH1IE from .vice import ViceIE from .viddler import ViddlerIE from .videodetective import VideoDetectiveIE -from .videolecturesnet import VideoLecturesNetIE from .videofyme import VideofyMeIE from .videomega import VideoMegaIE from .videopremium import VideoPremiumIE from .videott import VideoTtIE -from .videoweed import VideoWeedIE from .vidme import VidmeIE from .vidzi import VidziIE from .vier import VierIE, VierVideosIE from .viewster import ViewsterIE +from .viidea import ViideaIE from .vimeo import ( VimeoIE, VimeoAlbumIE, @@ -774,6 +840,7 @@ from .wrzuta import WrzutaIE from .wsj import WSJIE from .xbef import XBefIE from .xboxclips import XboxClipsIE +from .xfileshare import XFileShareIE from .xhamster import ( XHamsterIE, XHamsterEmbedIE, @@ -817,6 +884,7 @@ from .youtube import ( YoutubeTruncatedIDIE, YoutubeTruncatedURLIE, YoutubeUserIE, + YoutubePlaylistsIE, YoutubeWatchLaterIE, ) from .zapiks import ZapiksIE diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py index f9a389f67..6a29e587f 100644 --- a/youtube_dl/extractor/abc.py +++ b/youtube_dl/extractor/abc.py @@ -12,7 +12,7 @@ from ..utils import ( class ABCIE(InfoExtractor): IE_NAME = 'abc.net.au' - _VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)' + _VALID_URL = r'http://www\.abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334', @@ -23,6 +23,7 @@ class ABCIE(InfoExtractor): 'title': 'Australia to help staff Ebola treatment centre in Sierra Leone', 'description': 'md5:809ad29c67a05f54eb41f2a105693a67', }, + 'skip': 'this video has expired', }, { 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326', 'md5': 'db2a5369238b51f9811ad815b69dc086', @@ -36,6 +37,19 @@ class ABCIE(InfoExtractor): 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill', }, 'add_ie': ['Youtube'], + 'skip': 'Not accessible from Travis CI server', + }, { + 'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080', + 'md5': 'b96eee7c9edf4fc5a358a0252881cc1f', + 'info_dict': { + 'id': '6880080', + 'ext': 'mp3', + 'title': 'NAB lifts interest rates, following Westpac and CBA', + 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728', + }, + }, { + 'url': 'http://www.abc.net.au/news/2015-10-19/6866214', + 'only_matching': True, }] def _real_extract(self, url): @@ -43,9 +57,12 @@ class ABCIE(InfoExtractor): webpage = self._download_webpage(url, video_id) mobj = re.search( - r'inline(?P<type>Video|YouTube)Data\.push\((?P<json_data>[^)]+)\);', + r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);', webpage) if mobj is None: + expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None) + if expired: + raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True) raise ExtractorError('Unable to extract video urls') urls_info = self._parse_json( @@ -60,11 +77,13 @@ class ABCIE(InfoExtractor): formats = [{ 'url': url_info['url'], + 'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none', 'width': int_or_none(url_info.get('width')), 'height': int_or_none(url_info.get('height')), 'tbr': int_or_none(url_info.get('bitrate')), 'filesize': int_or_none(url_info.get('filesize')), } for url_info in urls_info] + self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/abc7news.py b/youtube_dl/extractor/abc7news.py index c04949c21..122dc9099 100644 --- a/youtube_dl/extractor/abc7news.py +++ b/youtube_dl/extractor/abc7news.py @@ -44,7 +44,6 @@ class Abc7NewsIE(InfoExtractor): 'contentURL', webpage, 'm3u8 url', fatal=True) formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4') - self._sort_formats(formats) title = self._og_search_title(webpage).strip() description = self._og_search_description(webpage).strip() diff --git a/youtube_dl/extractor/acast.py b/youtube_dl/extractor/acast.py new file mode 100644 index 000000000..be7913bc7 --- /dev/null +++ b/youtube_dl/extractor/acast.py @@ -0,0 +1,70 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import int_or_none + + +class ACastBaseIE(InfoExtractor): + _API_BASE_URL = 'https://www.acast.com/api/' + + +class ACastIE(ACastBaseIE): + IE_NAME = 'acast' + _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<channel>[^/]+)/(?P<id>[^/#?]+)' + _TEST = { + 'url': 'https://www.acast.com/condenasttraveler/-where-are-you-taipei-101-taiwan', + 'md5': 'ada3de5a1e3a2a381327d749854788bb', + 'info_dict': { + 'id': '57de3baa-4bb0-487e-9418-2692c1277a34', + 'ext': 'mp3', + 'title': '"Where Are You?": Taipei 101, Taiwan', + 'timestamp': 1196172000000, + 'description': 'md5:0c5d8201dfea2b93218ea986c91eee6e', + 'duration': 211, + } + } + + def _real_extract(self, url): + channel, display_id = re.match(self._VALID_URL, url).groups() + cast_data = self._download_json(self._API_BASE_URL + 'channels/%s/acasts/%s/playback' % (channel, display_id), display_id) + + return { + 'id': compat_str(cast_data['id']), + 'display_id': display_id, + 'url': cast_data['blings'][0]['audio'], + 'title': cast_data['name'], + 'description': cast_data.get('description'), + 'thumbnail': cast_data.get('image'), + 'timestamp': int_or_none(cast_data.get('publishingDate')), + 'duration': int_or_none(cast_data.get('duration')), + } + + +class ACastChannelIE(ACastBaseIE): + IE_NAME = 'acast:channel' + _VALID_URL = r'https?://(?:www\.)?acast\.com/(?P<id>[^/#?]+)' + _TEST = { + 'url': 'https://www.acast.com/condenasttraveler', + 'info_dict': { + 'id': '50544219-29bb-499e-a083-6087f4cb7797', + 'title': 'Condé Nast Traveler Podcast', + 'description': 'md5:98646dee22a5b386626ae31866638fbd', + }, + 'playlist_mincount': 20, + } + + @classmethod + def suitable(cls, url): + return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url) + + def _real_extract(self, url): + display_id = self._match_id(url) + channel_data = self._download_json(self._API_BASE_URL + 'channels/%s' % display_id, display_id) + casts = self._download_json(self._API_BASE_URL + 'channels/%s/acasts' % display_id, display_id) + entries = [self.url_result('https://www.acast.com/%s/%s' % (display_id, cast['url']), 'ACast') for cast in casts] + + return self.playlist_result(entries, compat_str(channel_data['id']), channel_data['name'], channel_data.get('description')) diff --git a/youtube_dl/extractor/adobetv.py b/youtube_dl/extractor/adobetv.py index 5e43adc51..8753ee2cf 100644 --- a/youtube_dl/extractor/adobetv.py +++ b/youtube_dl/extractor/adobetv.py @@ -1,23 +1,32 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor +from ..compat import compat_str from ..utils import ( parse_duration, unified_strdate, str_to_int, + int_or_none, float_or_none, ISO639Utils, + determine_ext, ) -class AdobeTVIE(InfoExtractor): - _VALID_URL = r'https?://tv\.adobe\.com/watch/[^/]+/(?P<id>[^/]+)' +class AdobeTVBaseIE(InfoExtractor): + _API_BASE_URL = 'http://tv.adobe.com/api/v4/' + + +class AdobeTVIE(AdobeTVBaseIE): + _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?watch/(?P<show_urlname>[^/]+)/(?P<id>[^/]+)' _TEST = { 'url': 'http://tv.adobe.com/watch/the-complete-picture-with-julieanne-kost/quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop/', 'md5': '9bc5727bcdd55251f35ad311ca74fa1e', 'info_dict': { - 'id': 'quick-tip-how-to-draw-a-circle-around-an-object-in-photoshop', + 'id': '10981', 'ext': 'mp4', 'title': 'Quick Tip - How to Draw a Circle Around an Object in Photoshop', 'description': 'md5:99ec318dc909d7ba2a1f2b038f7d2311', @@ -29,50 +38,106 @@ class AdobeTVIE(InfoExtractor): } def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - player = self._parse_json( - self._search_regex(r'html5player:\s*({.+?})\s*\n', webpage, 'player'), - video_id) + language, show_urlname, urlname = re.match(self._VALID_URL, url).groups() + if not language: + language = 'en' - title = player.get('title') or self._search_regex( - r'data-title="([^"]+)"', webpage, 'title') - description = self._og_search_description(webpage) - thumbnail = self._og_search_thumbnail(webpage) - - upload_date = unified_strdate( - self._html_search_meta('datepublished', webpage, 'upload date')) - - duration = parse_duration( - self._html_search_meta('duration', webpage, 'duration') or - self._search_regex( - r'Runtime:\s*(\d{2}:\d{2}:\d{2})', - webpage, 'duration', fatal=False)) - - view_count = str_to_int(self._search_regex( - r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>', - webpage, 'view count')) + video_data = self._download_json( + self._API_BASE_URL + 'episode/get/?language=%s&show_urlname=%s&urlname=%s&disclosure=standard' % (language, show_urlname, urlname), + urlname)['data'][0] formats = [{ - 'url': source['src'], - 'format_id': source.get('quality') or source['src'].split('-')[-1].split('.')[0] or None, - 'tbr': source.get('bitrate'), - } for source in player['sources']] + 'url': source['url'], + 'format_id': source.get('quality_level') or source['url'].split('-')[-1].split('.')[0] or None, + 'width': int_or_none(source.get('width')), + 'height': int_or_none(source.get('height')), + 'tbr': int_or_none(source.get('video_data_rate')), + } for source in video_data['videos']] self._sort_formats(formats) return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'upload_date': upload_date, - 'duration': duration, - 'view_count': view_count, + 'id': compat_str(video_data['id']), + 'title': video_data['title'], + 'description': video_data.get('description'), + 'thumbnail': video_data.get('thumbnail'), + 'upload_date': unified_strdate(video_data.get('start_date')), + 'duration': parse_duration(video_data.get('duration')), + 'view_count': str_to_int(video_data.get('playcount')), 'formats': formats, } +class AdobeTVPlaylistBaseIE(AdobeTVBaseIE): + def _parse_page_data(self, page_data): + return [self.url_result(self._get_element_url(element_data)) for element_data in page_data] + + def _extract_playlist_entries(self, url, display_id): + page = self._download_json(url, display_id) + entries = self._parse_page_data(page['data']) + for page_num in range(2, page['paging']['pages'] + 1): + entries.extend(self._parse_page_data( + self._download_json(url + '&page=%d' % page_num, display_id)['data'])) + return entries + + +class AdobeTVShowIE(AdobeTVPlaylistBaseIE): + _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?show/(?P<id>[^/]+)' + + _TEST = { + 'url': 'http://tv.adobe.com/show/the-complete-picture-with-julieanne-kost', + 'info_dict': { + 'id': '36', + 'title': 'The Complete Picture with Julieanne Kost', + 'description': 'md5:fa50867102dcd1aa0ddf2ab039311b27', + }, + 'playlist_mincount': 136, + } + + def _get_element_url(self, element_data): + return element_data['urls'][0] + + def _real_extract(self, url): + language, show_urlname = re.match(self._VALID_URL, url).groups() + if not language: + language = 'en' + query = 'language=%s&show_urlname=%s' % (language, show_urlname) + + show_data = self._download_json(self._API_BASE_URL + 'show/get/?%s' % query, show_urlname)['data'][0] + + return self.playlist_result( + self._extract_playlist_entries(self._API_BASE_URL + 'episode/?%s' % query, show_urlname), + compat_str(show_data['id']), + show_data['show_name'], + show_data['show_description']) + + +class AdobeTVChannelIE(AdobeTVPlaylistBaseIE): + _VALID_URL = r'https?://tv\.adobe\.com/(?:(?P<language>fr|de|es|jp)/)?channel/(?P<id>[^/]+)(?:/(?P<category_urlname>[^/]+))?' + + _TEST = { + 'url': 'http://tv.adobe.com/channel/development', + 'info_dict': { + 'id': 'development', + }, + 'playlist_mincount': 96, + } + + def _get_element_url(self, element_data): + return element_data['url'] + + def _real_extract(self, url): + language, channel_urlname, category_urlname = re.match(self._VALID_URL, url).groups() + if not language: + language = 'en' + query = 'language=%s&channel_urlname=%s' % (language, channel_urlname) + if category_urlname: + query += '&category_urlname=%s' % category_urlname + + return self.playlist_result( + self._extract_playlist_entries(self._API_BASE_URL + 'show/?%s' % query, channel_urlname), + channel_urlname) + + class AdobeTVVideoIE(InfoExtractor): _VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)' @@ -91,28 +156,25 @@ class AdobeTVVideoIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - - webpage = self._download_webpage(url, video_id) - - player_params = self._parse_json(self._search_regex( - r'var\s+bridge\s*=\s*([^;]+);', webpage, 'player parameters'), - video_id) + video_data = self._download_json(url + '?format=json', video_id) formats = [{ + 'format_id': '%s-%s' % (determine_ext(source['src']), source.get('height')), 'url': source['src'], - 'width': source.get('width'), - 'height': source.get('height'), - 'tbr': source.get('bitrate'), - } for source in player_params['sources']] + 'width': int_or_none(source.get('width')), + 'height': int_or_none(source.get('height')), + 'tbr': int_or_none(source.get('bitrate')), + } for source in video_data['sources']] + self._sort_formats(formats) # For both metadata and downloaded files the duration varies among # formats. I just pick the max one duration = max(filter(None, [ float_or_none(source.get('duration'), scale=1000) - for source in player_params['sources']])) + for source in video_data['sources']])) subtitles = {} - for translation in player_params.get('translations', []): + for translation in video_data.get('translations', []): lang_id = translation.get('language_w3c') or ISO639Utils.long2short(translation['language_medium']) if lang_id not in subtitles: subtitles[lang_id] = [] @@ -124,8 +186,9 @@ class AdobeTVVideoIE(InfoExtractor): return { 'id': video_id, 'formats': formats, - 'title': player_params['title'], - 'description': self._og_search_description(webpage), + 'title': video_data['title'], + 'description': video_data.get('description'), + 'thumbnail': video_data['video'].get('poster'), 'duration': duration, 'subtitles': subtitles, } diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py index 4327c2f61..bf21a6887 100644 --- a/youtube_dl/extractor/adultswim.py +++ b/youtube_dl/extractor/adultswim.py @@ -5,6 +5,7 @@ import re from .common import InfoExtractor from ..utils import ( + determine_ext, ExtractorError, float_or_none, xpath_text, @@ -40,7 +41,8 @@ class AdultSwimIE(InfoExtractor): 'id': 'rQxZvXQ4ROaSOqq-or2Mow', 'title': 'Rick and Morty - Pilot', 'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. " - } + }, + 'skip': 'This video is only available for registered users', }, { 'url': 'http://www.adultswim.com/videos/playlists/american-parenting/putting-francine-out-of-business/', 'playlist': [ @@ -66,7 +68,7 @@ class AdultSwimIE(InfoExtractor): 'md5': '3e346a2ab0087d687a05e1e7f3b3e529', 'info_dict': { 'id': 'sY3cMUR_TbuE4YmdjzbIcQ-0', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine', 'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n', }, @@ -77,6 +79,10 @@ class AdultSwimIE(InfoExtractor): 'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine', 'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n', }, + 'params': { + # m3u8 download + 'skip_download': True, + } }] @staticmethod @@ -123,7 +129,6 @@ class AdultSwimIE(InfoExtractor): else: collections = bootstrapped_data['show']['collections'] collection, video_info = self.find_collection_containing_video(collections, episode_path) - # Video wasn't found in the collections, let's try `slugged_video`. if video_info is None: if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path: @@ -133,7 +138,15 @@ class AdultSwimIE(InfoExtractor): show = bootstrapped_data['show'] show_title = show['title'] - segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']] + stream = video_info.get('stream') + clips = [stream] if stream else video_info.get('clips') + if not clips: + raise ExtractorError( + 'This video is only available via cable service provider subscription that' + ' is not currently supported. You may want to use --cookies.' + if video_info.get('auth') is True else 'Unable to find stream or clips', + expected=True) + segment_ids = [clip['videoPlaybackID'] for clip in clips] episode_id = video_info['id'] episode_title = video_info['title'] @@ -142,7 +155,7 @@ class AdultSwimIE(InfoExtractor): entries = [] for part_num, segment_id in enumerate(segment_ids): - segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=mobile' % segment_id + segment_url = 'http://www.adultswim.com/videos/api/v0/assets?id=%s&platform=desktop' % segment_id segment_title = '%s - %s' % (show_title, episode_title) if len(segment_ids) > 1: @@ -158,17 +171,30 @@ class AdultSwimIE(InfoExtractor): formats = [] file_els = idoc.findall('.//files/file') or idoc.findall('./files/file') + unique_urls = [] + unique_file_els = [] for file_el in file_els: + media_url = file_el.text + if not media_url or determine_ext(media_url) == 'f4m': + continue + if file_el.text not in unique_urls: + unique_urls.append(file_el.text) + unique_file_els.append(file_el) + + for file_el in unique_file_els: bitrate = file_el.attrib.get('bitrate') ftype = file_el.attrib.get('type') - - formats.append({ - 'format_id': '%s_%s' % (bitrate, ftype), - 'url': file_el.text.strip(), - # The bitrate may not be a number (for example: 'iphone') - 'tbr': int(bitrate) if bitrate.isdigit() else None, - 'quality': 1 if ftype == 'hd' else -1 - }) + media_url = file_el.text + if determine_ext(media_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + media_url, segment_title, 'mp4', preference=0, m3u8_id='hls')) + else: + formats.append({ + 'format_id': '%s_%s' % (bitrate, ftype), + 'url': file_el.text.strip(), + # The bitrate may not be a number (for example: 'iphone') + 'tbr': int(bitrate) if bitrate.isdigit() else None, + }) self._sort_formats(formats) diff --git a/youtube_dl/extractor/aftenposten.py b/youtube_dl/extractor/aftenposten.py deleted file mode 100644 index 0c00acfb5..000000000 --- a/youtube_dl/extractor/aftenposten.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -from .common import InfoExtractor - - -class AftenpostenIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/(?:#!/)?video/(?P<id>\d+)' - _TEST = { - 'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more', - 'md5': 'fd828cd29774a729bf4d4425fe192972', - 'info_dict': { - 'id': '21039', - 'ext': 'mov', - 'title': 'TRAILER: "Sweatshop" - I can´t take any more', - 'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238', - 'timestamp': 1416927969, - 'upload_date': '20141125', - } - } - - def _real_extract(self, url): - return self.url_result('xstream:ap:%s' % self._match_id(url), 'Xstream') diff --git a/youtube_dl/extractor/aljazeera.py b/youtube_dl/extractor/aljazeera.py index 184a14a4f..5b2c0dc9a 100644 --- a/youtube_dl/extractor/aljazeera.py +++ b/youtube_dl/extractor/aljazeera.py @@ -15,7 +15,7 @@ class AlJazeeraIE(InfoExtractor): 'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.', 'uploader': 'Al Jazeera English', }, - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], 'skip': 'Not accessible from Travis CI server', } @@ -32,5 +32,5 @@ class AlJazeeraIE(InfoExtractor): 'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc' '&%40videoPlayer={0}'.format(brightcove_id) ), - 'ie_key': 'Brightcove', + 'ie_key': 'BrightcoveLegacy', } diff --git a/youtube_dl/extractor/amp.py b/youtube_dl/extractor/amp.py new file mode 100644 index 000000000..1035d1c48 --- /dev/null +++ b/youtube_dl/extractor/amp.py @@ -0,0 +1,80 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_iso8601, +) + + +class AMPIE(InfoExtractor): + # parse Akamai Adaptive Media Player feed + def _extract_feed_info(self, url): + item = self._download_json( + url, None, 'Downloading Akamai AMP feed', + 'Unable to download Akamai AMP feed')['channel']['item'] + + video_id = item['guid'] + + def get_media_node(name, default=None): + media_name = 'media-%s' % name + media_group = item.get('media-group') or item + return media_group.get(media_name) or item.get(media_name) or item.get(name, default) + + thumbnails = [] + media_thumbnail = get_media_node('thumbnail') + if media_thumbnail: + if isinstance(media_thumbnail, dict): + media_thumbnail = [media_thumbnail] + for thumbnail_data in media_thumbnail: + thumbnail = thumbnail_data['@attributes'] + thumbnails.append({ + 'url': self._proto_relative_url(thumbnail['url'], 'http:'), + 'width': int_or_none(thumbnail.get('width')), + 'height': int_or_none(thumbnail.get('height')), + }) + + subtitles = {} + media_subtitle = get_media_node('subTitle') + if media_subtitle: + if isinstance(media_subtitle, dict): + media_subtitle = [media_subtitle] + for subtitle_data in media_subtitle: + subtitle = subtitle_data['@attributes'] + lang = subtitle.get('lang') or 'en' + subtitles[lang] = [{'url': subtitle['href']}] + + formats = [] + media_content = get_media_node('content') + if isinstance(media_content, dict): + media_content = [media_content] + for media_data in media_content: + media = media_data['@attributes'] + media_type = media['type'] + if media_type == 'video/f4m': + formats.extend(self._extract_f4m_formats( + media['url'] + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124', + video_id, f4m_id='hds', fatal=False)) + elif media_type == 'application/x-mpegURL': + formats.extend(self._extract_m3u8_formats( + media['url'], video_id, 'mp4', m3u8_id='hls', fatal=False)) + else: + formats.append({ + 'format_id': media_data['media-category']['@attributes']['label'], + 'url': media['url'], + 'tbr': int_or_none(media.get('bitrate')), + 'filesize': int_or_none(media.get('fileSize')), + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': get_media_node('title'), + 'description': get_media_node('description'), + 'thumbnails': thumbnails, + 'timestamp': parse_iso8601(item.get('pubDate'), ' '), + 'duration': int_or_none(media_content[0].get('@attributes', {}).get('duration')), + 'formats': formats, + } diff --git a/youtube_dl/extractor/anitube.py b/youtube_dl/extractor/anitube.py index 31f0d417c..23f942ae2 100644 --- a/youtube_dl/extractor/anitube.py +++ b/youtube_dl/extractor/anitube.py @@ -26,8 +26,8 @@ class AnitubeIE(InfoExtractor): video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) - key = self._html_search_regex( - r'http://www\.anitube\.se/embed/([A-Za-z0-9_-]*)', webpage, 'key') + key = self._search_regex( + r'src=["\']https?://[^/]+/embed/([A-Za-z0-9_-]+)', webpage, 'key') config_xml = self._download_xml( 'http://www.anitube.se/nuevo/econfig.php?key=%s' % key, key) diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py index 576f03b5b..62ed0c918 100644 --- a/youtube_dl/extractor/appletrailers.py +++ b/youtube_dl/extractor/appletrailers.py @@ -11,59 +11,66 @@ from ..utils import ( class AppleTrailersIE(InfoExtractor): + IE_NAME = 'appletrailers' _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)' _TESTS = [{ - "url": "http://trailers.apple.com/trailers/wb/manofsteel/", + 'url': 'http://trailers.apple.com/trailers/wb/manofsteel/', 'info_dict': { 'id': 'manofsteel', }, - "playlist": [ + 'playlist': [ { - "md5": "d97a8e575432dbcb81b7c3acb741f8a8", - "info_dict": { - "id": "manofsteel-trailer4", - "ext": "mov", - "duration": 111, - "title": "Trailer 4", - "upload_date": "20130523", - "uploader_id": "wb", + 'md5': 'd97a8e575432dbcb81b7c3acb741f8a8', + 'info_dict': { + 'id': 'manofsteel-trailer4', + 'ext': 'mov', + 'duration': 111, + 'title': 'Trailer 4', + 'upload_date': '20130523', + 'uploader_id': 'wb', }, }, { - "md5": "b8017b7131b721fb4e8d6f49e1df908c", - "info_dict": { - "id": "manofsteel-trailer3", - "ext": "mov", - "duration": 182, - "title": "Trailer 3", - "upload_date": "20130417", - "uploader_id": "wb", + 'md5': 'b8017b7131b721fb4e8d6f49e1df908c', + 'info_dict': { + 'id': 'manofsteel-trailer3', + 'ext': 'mov', + 'duration': 182, + 'title': 'Trailer 3', + 'upload_date': '20130417', + 'uploader_id': 'wb', }, }, { - "md5": "d0f1e1150989b9924679b441f3404d48", - "info_dict": { - "id": "manofsteel-trailer", - "ext": "mov", - "duration": 148, - "title": "Trailer", - "upload_date": "20121212", - "uploader_id": "wb", + 'md5': 'd0f1e1150989b9924679b441f3404d48', + 'info_dict': { + 'id': 'manofsteel-trailer', + 'ext': 'mov', + 'duration': 148, + 'title': 'Trailer', + 'upload_date': '20121212', + 'uploader_id': 'wb', }, }, { - "md5": "5fe08795b943eb2e757fa95cb6def1cb", - "info_dict": { - "id": "manofsteel-teaser", - "ext": "mov", - "duration": 93, - "title": "Teaser", - "upload_date": "20120721", - "uploader_id": "wb", + 'md5': '5fe08795b943eb2e757fa95cb6def1cb', + 'info_dict': { + 'id': 'manofsteel-teaser', + 'ext': 'mov', + 'duration': 93, + 'title': 'Teaser', + 'upload_date': '20120721', + 'uploader_id': 'wb', }, }, ] }, { + 'url': 'http://trailers.apple.com/trailers/magnolia/blackthorn/', + 'info_dict': { + 'id': 'blackthorn', + }, + 'playlist_mincount': 2, + }, { 'url': 'http://trailers.apple.com/ca/metropole/autrui/', 'only_matching': True, }] @@ -79,7 +86,7 @@ class AppleTrailersIE(InfoExtractor): def fix_html(s): s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s) - s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s) + s = re.sub(r'<img ([^<]*?)/?>', r'<img \1/>', s) # The ' in the onClick attributes are not escaped, it couldn't be parsed # like: http://trailers.apple.com/trailers/wb/gravity/ @@ -96,6 +103,9 @@ class AppleTrailersIE(InfoExtractor): trailer_info_json = self._search_regex(self._JSON_RE, on_click, 'trailer info') trailer_info = json.loads(trailer_info_json) + first_url = trailer_info.get('url') + if not first_url: + continue title = trailer_info['title'] video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() thumbnail = li.find('.//img').attrib['src'] @@ -107,7 +117,6 @@ class AppleTrailersIE(InfoExtractor): if m: duration = 60 * int(m.group('minutes')) + int(m.group('seconds')) - first_url = trailer_info['url'] trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower() settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id) settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json') @@ -144,3 +153,76 @@ class AppleTrailersIE(InfoExtractor): 'id': movie, 'entries': playlist, } + + +class AppleTrailersSectionIE(InfoExtractor): + IE_NAME = 'appletrailers:section' + _SECTIONS = { + 'justadded': { + 'feed_path': 'just_added', + 'title': 'Just Added', + }, + 'exclusive': { + 'feed_path': 'exclusive', + 'title': 'Exclusive', + }, + 'justhd': { + 'feed_path': 'just_hd', + 'title': 'Just HD', + }, + 'mostpopular': { + 'feed_path': 'most_pop', + 'title': 'Most Popular', + }, + 'moviestudios': { + 'feed_path': 'studios', + 'title': 'Movie Studios', + }, + } + _VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/#section=(?P<id>%s)' % '|'.join(_SECTIONS) + _TESTS = [{ + 'url': 'http://trailers.apple.com/#section=justadded', + 'info_dict': { + 'title': 'Just Added', + 'id': 'justadded', + }, + 'playlist_mincount': 80, + }, { + 'url': 'http://trailers.apple.com/#section=exclusive', + 'info_dict': { + 'title': 'Exclusive', + 'id': 'exclusive', + }, + 'playlist_mincount': 80, + }, { + 'url': 'http://trailers.apple.com/#section=justhd', + 'info_dict': { + 'title': 'Just HD', + 'id': 'justhd', + }, + 'playlist_mincount': 80, + }, { + 'url': 'http://trailers.apple.com/#section=mostpopular', + 'info_dict': { + 'title': 'Most Popular', + 'id': 'mostpopular', + }, + 'playlist_mincount': 80, + }, { + 'url': 'http://trailers.apple.com/#section=moviestudios', + 'info_dict': { + 'title': 'Movie Studios', + 'id': 'moviestudios', + }, + 'playlist_mincount': 80, + }] + + def _real_extract(self, url): + section = self._match_id(url) + section_data = self._download_json( + 'http://trailers.apple.com/trailers/home/feeds/%s.json' % self._SECTIONS[section]['feed_path'], + section) + entries = [ + self.url_result('http://trailers.apple.com' + e['location']) + for e in section_data] + return self.playlist_result(entries, section, self._SECTIONS[section]['title']) diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py index 6f465789b..9fb84911a 100644 --- a/youtube_dl/extractor/ard.py +++ b/youtube_dl/extractor/ard.py @@ -14,8 +14,8 @@ from ..utils import ( parse_duration, unified_strdate, xpath_text, - parse_xml, ) +from ..compat import compat_etree_fromstring class ARDMediathekIE(InfoExtractor): @@ -110,13 +110,15 @@ class ARDMediathekIE(InfoExtractor): server = stream.get('_server') for stream_url in stream_urls: ext = determine_ext(stream_url) + if quality != 'auto' and ext in ('f4m', 'm3u8'): + continue if ext == 'f4m': formats.extend(self._extract_f4m_formats( stream_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', - video_id, preference=-1, f4m_id='hds')) + video_id, preference=-1, f4m_id='hds', fatal=False)) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( - stream_url, video_id, 'mp4', preference=1, m3u8_id='hls')) + stream_url, video_id, 'mp4', preference=1, m3u8_id='hls', fatal=False)) else: if server and server.startswith('rtmp'): f = { @@ -161,7 +163,7 @@ class ARDMediathekIE(InfoExtractor): raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True) if re.search(r'[\?&]rss($|[=&])', url): - doc = parse_xml(webpage) + doc = compat_etree_fromstring(webpage.encode('utf-8')) if doc.tag == 'rss': return GenericIE()._extract_rss(url, video_id, doc) diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index 2a00da3ee..10301a8ea 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -68,9 +68,13 @@ class ArteTVPlus7IE(InfoExtractor): def _extract_url_info(cls, url): mobj = re.match(cls._VALID_URL, url) lang = mobj.group('lang') - # This is not a real id, it can be for example AJT for the news - # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal - video_id = mobj.group('id') + query = compat_parse_qs(compat_urllib_parse_urlparse(url).query) + if 'vid' in query: + video_id = query['vid'][0] + else: + # This is not a real id, it can be for example AJT for the news + # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal + video_id = mobj.group('id') return video_id, lang def _real_extract(self, url): @@ -79,9 +83,15 @@ class ArteTVPlus7IE(InfoExtractor): return self._extract_from_webpage(webpage, video_id, lang) def _extract_from_webpage(self, webpage, video_id, lang): + patterns_templates = (r'arte_vp_url=["\'](.*?%s.*?)["\']', r'data-url=["\']([^"]+%s[^"]+)["\']') + ids = (video_id, '') + # some pages contain multiple videos (like + # http://www.arte.tv/guide/de/sendungen/XEN/xenius/?vid=055918-015_PLUS7-D), + # so we first try to look for json URLs that contain the video id from + # the 'vid' parameter. + patterns = [t % re.escape(_id) for _id in ids for t in patterns_templates] json_url = self._html_search_regex( - [r'arte_vp_url=["\'](.*?)["\']', r'data-url=["\']([^"]+)["\']'], - webpage, 'json vp url', default=None) + patterns, webpage, 'json vp url', default=None) if not json_url: iframe_url = self._html_search_regex( r'<iframe[^>]+src=(["\'])(?P<url>.+\bjson_url=.+?)\1', diff --git a/youtube_dl/extractor/atresplayer.py b/youtube_dl/extractor/atresplayer.py index 29f8795d3..3fb042cea 100644 --- a/youtube_dl/extractor/atresplayer.py +++ b/youtube_dl/extractor/atresplayer.py @@ -2,16 +2,18 @@ from __future__ import unicode_literals import time import hmac +import hashlib +import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, ) from ..utils import ( int_or_none, float_or_none, + sanitized_Request, xpath_text, ExtractorError, ) @@ -32,6 +34,19 @@ class AtresPlayerIE(InfoExtractor): 'duration': 5527.6, 'thumbnail': 're:^https?://.*\.jpg$', }, + 'skip': 'This video is only available for registered users' + }, + { + 'url': 'http://www.atresplayer.com/television/especial/videoencuentros/temporada-1/capitulo-112-david-bustamante_2014121600375.html', + 'md5': '0d0e918533bbd4b263f2de4d197d4aac', + 'info_dict': { + 'id': 'capitulo-112-david-bustamante', + 'ext': 'flv', + 'title': 'David Bustamante', + 'description': 'md5:f33f1c0a05be57f6708d4dd83a3b81c6', + 'duration': 1439.0, + 'thumbnail': 're:^https?://.*\.jpg$', + }, }, { 'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html', @@ -50,6 +65,13 @@ class AtresPlayerIE(InfoExtractor): _LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check' + _ERRORS = { + 'UNPUBLISHED': 'We\'re sorry, but this video is not yet available.', + 'DELETED': 'This video has expired and is no longer available for online streaming.', + 'GEOUNPUBLISHED': 'We\'re sorry, but this video is not available in your region due to right restrictions.', + # 'PREMIUM': 'PREMIUM', + } + def _real_initialize(self): self._login() @@ -63,7 +85,7 @@ class AtresPlayerIE(InfoExtractor): 'j_password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) request.add_header('Content-Type', 'application/x-www-form-urlencoded') response = self._download_webpage( @@ -83,58 +105,77 @@ class AtresPlayerIE(InfoExtractor): episode_id = self._search_regex( r'episode="([^"]+)"', webpage, 'episode id') + request = sanitized_Request( + self._PLAYER_URL_TEMPLATE % episode_id, + headers={'User-Agent': self._USER_AGENT}) + player = self._download_json(request, episode_id, 'Downloading player JSON') + + episode_type = player.get('typeOfEpisode') + error_message = self._ERRORS.get(episode_type) + if error_message: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error_message), expected=True) + + formats = [] + video_url = player.get('urlVideo') + if video_url: + format_info = { + 'url': video_url, + 'format_id': 'http', + } + mobj = re.search(r'(?P<bitrate>\d+)K_(?P<width>\d+)x(?P<height>\d+)', video_url) + if mobj: + format_info.update({ + 'width': int_or_none(mobj.group('width')), + 'height': int_or_none(mobj.group('height')), + 'tbr': int_or_none(mobj.group('bitrate')), + }) + formats.append(format_info) + + m3u8_url = player.get('urlVideoHls') + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, episode_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) + timestamp = int_or_none(self._download_webpage( self._TIME_API_URL, video_id, 'Downloading timestamp', fatal=False), 1000, time.time()) timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT) token = hmac.new( self._MAGIC.encode('ascii'), - (episode_id + timestamp_shifted).encode('utf-8') + (episode_id + timestamp_shifted).encode('utf-8'), hashlib.md5 ).hexdigest() - formats = [] - for fmt in ['windows', 'android_tablet']: - request = compat_urllib_request.Request( - self._URL_VIDEO_TEMPLATE.format(fmt, episode_id, timestamp_shifted, token)) - request.add_header('User-Agent', self._USER_AGENT) - - fmt_json = self._download_json( - request, video_id, 'Downloading %s video JSON' % fmt) - - result = fmt_json.get('resultDes') - if result.lower() != 'ok': - raise ExtractorError( - '%s returned error: %s' % (self.IE_NAME, result), expected=True) - - for format_id, video_url in fmt_json['resultObject'].items(): - if format_id == 'token' or not video_url.startswith('http'): - continue - if video_url.endswith('/Manifest'): - if 'geodeswowsmpra3player' in video_url: - f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0] - f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path) - # this videos are protected by DRM, the f4m downloader doesn't support them - continue - else: - f4m_url = video_url[:-9] + '/manifest.f4m' - formats.extend(self._extract_f4m_formats(f4m_url, video_id)) - else: - formats.append({ - 'url': video_url, - 'format_id': 'android-%s' % format_id, - 'preference': 1, - }) - self._sort_formats(formats) + request = sanitized_Request( + self._URL_VIDEO_TEMPLATE.format('windows', episode_id, timestamp_shifted, token), + headers={'User-Agent': self._USER_AGENT}) - player = self._download_json( - self._PLAYER_URL_TEMPLATE % episode_id, - episode_id) + fmt_json = self._download_json( + request, video_id, 'Downloading windows video JSON') + + result = fmt_json.get('resultDes') + if result.lower() != 'ok': + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, result), expected=True) + + for format_id, video_url in fmt_json['resultObject'].items(): + if format_id == 'token' or not video_url.startswith('http'): + continue + if 'geodeswowsmpra3player' in video_url: + f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0] + f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path) + # this videos are protected by DRM, the f4m downloader doesn't support them + continue + else: + f4m_url = video_url[:-9] + '/manifest.f4m' + formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)) + self._sort_formats(formats) path_data = player.get('pathData') episode = self._download_xml( - self._EPISODE_URL_TEMPLATE % path_data, - video_id, 'Downloading episode XML') + self._EPISODE_URL_TEMPLATE % path_data, video_id, + 'Downloading episode XML') duration = float_or_none(xpath_text( episode, './media/asset/info/technical/contentDuration', 'duration')) diff --git a/youtube_dl/extractor/audimedia.py b/youtube_dl/extractor/audimedia.py new file mode 100644 index 000000000..3b2effa15 --- /dev/null +++ b/youtube_dl/extractor/audimedia.py @@ -0,0 +1,80 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_iso8601, + sanitized_Request, +) + + +class AudiMediaIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?audimedia\.tv/(?:en|de)/vid/(?P<id>[^/?#]+)' + _TEST = { + 'url': 'https://audimedia.tv/en/vid/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test', + 'md5': '79a8b71c46d49042609795ab59779b66', + 'info_dict': { + 'id': '1565', + 'ext': 'mp4', + 'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test', + 'description': 'md5:60e5d30a78ced725f7b8d34370762941', + 'upload_date': '20151124', + 'timestamp': 1448354940, + 'duration': 74022, + 'view_count': int, + } + } + # extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken) + _AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2' + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + raw_payload = self._search_regex(r'<script[^>]+class="amtv-embed"[^>]+id="([^"]+)"', webpage, 'raw payload') + _, stage_mode, video_id, lang = raw_payload.split('-') + + # TODO: handle s and e stage_mode (live streams and ended live streams) + if stage_mode not in ('s', 'e'): + request = sanitized_Request( + 'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang), + headers={'X-Auth-Token': self._AUTH_TOKEN}) + json_data = self._download_json(request, video_id)['results'] + formats = [] + + stream_url_hls = json_data.get('stream_url_hls') + if stream_url_hls: + formats.extend(self._extract_m3u8_formats( + stream_url_hls, video_id, 'mp4', + entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) + + stream_url_hds = json_data.get('stream_url_hds') + if stream_url_hds: + formats.extend(self._extract_f4m_formats( + stream_url_hds + '?hdcore=3.4.0', + video_id, f4m_id='hds', fatal=False)) + + for video_version in json_data.get('video_versions'): + video_version_url = video_version.get('download_url') or video_version.get('stream_url') + if not video_version_url: + continue + formats.append({ + 'url': video_version_url, + 'width': int_or_none(video_version.get('width')), + 'height': int_or_none(video_version.get('height')), + 'abr': int_or_none(video_version.get('audio_bitrate')), + 'vbr': int_or_none(video_version.get('video_bitrate')), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': json_data['title'], + 'description': json_data.get('subtitle'), + 'thumbnail': json_data.get('thumbnail_image', {}).get('file'), + 'timestamp': parse_iso8601(json_data.get('publication_date')), + 'duration': int_or_none(json_data.get('duration')), + 'view_count': int_or_none(json_data.get('view_count')), + 'formats': formats, + } diff --git a/youtube_dl/extractor/audiomack.py b/youtube_dl/extractor/audiomack.py index 693ba22c6..3eed91279 100644 --- a/youtube_dl/extractor/audiomack.py +++ b/youtube_dl/extractor/audiomack.py @@ -56,7 +56,7 @@ class AudiomackIE(InfoExtractor): # API is inconsistent with errors if 'url' not in api_response or not api_response['url'] or 'error' in api_response: - raise ExtractorError('Invalid url %s', url) + raise ExtractorError('Invalid url %s' % url) # Audiomack wraps a lot of soundcloud tracks in their branded wrapper # if so, pass the work off to the soundcloud extractor diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py index 8dff1d6e3..da986e063 100644 --- a/youtube_dl/extractor/bambuser.py +++ b/youtube_dl/extractor/bambuser.py @@ -6,13 +6,13 @@ import itertools from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_str, ) from ..utils import ( ExtractorError, int_or_none, float_or_none, + sanitized_Request, ) @@ -57,7 +57,7 @@ class BambuserIE(InfoExtractor): 'pass': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) request.add_header('Referer', self._LOGIN_URL) response = self._download_webpage( @@ -126,7 +126,7 @@ class BambuserChannelIE(InfoExtractor): '&sort=created&access_mode=0%2C1%2C2&limit={count}' '&method=broadcast&format=json&vid_older_than={last}' ).format(user=user, count=self._STEP, last=last_id) - req = compat_urllib_request.Request(req_url) + req = sanitized_Request(req_url) # Without setting this header, we wouldn't get any result req.add_header('Referer', 'http://bambuser.com/channel/%s' % user) data = self._download_json( diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py index 505877b77..c1ef8051d 100644 --- a/youtube_dl/extractor/bandcamp.py +++ b/youtube_dl/extractor/bandcamp.py @@ -10,6 +10,8 @@ from ..compat import ( ) from ..utils import ( ExtractorError, + float_or_none, + int_or_none, ) @@ -52,11 +54,11 @@ class BandcampIE(InfoExtractor): ext, abr_str = format_id.split('-', 1) formats.append({ 'format_id': format_id, - 'url': format_url, + 'url': self._proto_relative_url(format_url, 'http:'), 'ext': ext, 'vcodec': 'none', 'acodec': ext, - 'abr': int(abr_str), + 'abr': int_or_none(abr_str), }) self._sort_formats(formats) @@ -65,7 +67,7 @@ class BandcampIE(InfoExtractor): 'id': compat_str(data['id']), 'title': data['title'], 'formats': formats, - 'duration': float(data['duration']), + 'duration': float_or_none(data.get('duration')), } else: raise ExtractorError('No free songs found') @@ -93,8 +95,8 @@ class BandcampIE(InfoExtractor): final_url_webpage = self._download_webpage(request_url, video_id, 'Requesting download url') # If we could correctly generate the .rand field the url would be # in the "download_url" key - final_url = self._search_regex( - r'"retry_url":"(.*?)"', final_url_webpage, 'final video URL') + final_url = self._proto_relative_url(self._search_regex( + r'"retry_url":"(.+?)"', final_url_webpage, 'final video URL'), 'http:') return { 'id': video_id, diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py index 42526357a..923273fb2 100644 --- a/youtube_dl/extractor/bbc.py +++ b/youtube_dl/extractor/bbc.py @@ -2,7 +2,6 @@ from __future__ import unicode_literals import re -import xml.etree.ElementTree from .common import InfoExtractor from ..utils import ( @@ -11,29 +10,45 @@ from ..utils import ( int_or_none, parse_duration, parse_iso8601, + remove_end, + unescapeHTML, +) +from ..compat import ( + compat_etree_fromstring, + compat_HTTPError, ) -from ..compat import compat_HTTPError class BBCCoUkIE(InfoExtractor): IE_NAME = 'bbc.co.uk' IE_DESC = 'BBC iPlayer' - _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})' + _ID_REGEX = r'[pb][\da-z]{7}' + _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:programmes/(?!articles/)|iplayer(?:/[^/]+)?/(?:episode/|playlist/))|music/clips[/#])(?P<id>%s)' % _ID_REGEX _MEDIASELECTOR_URLS = [ + # Provides HQ HLS streams with even better quality that pc mediaset but fails + # with geolocation in some cases when it's even not geo restricted at all (e.g. + # http://www.bbc.co.uk/programmes/b06bp7lf). Also may fail with selectionunavailable. 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s', 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s', ] + _MEDIASELECTION_NS = 'http://bbc.co.uk/2008/mp/mediaselection' + _EMP_PLAYLIST_NS = 'http://bbc.co.uk/2008/emp/playlist' + + _NAMESPACES = ( + _MEDIASELECTION_NS, + _EMP_PLAYLIST_NS, + ) + _TESTS = [ { 'url': 'http://www.bbc.co.uk/programmes/b039g8p7', 'info_dict': { 'id': 'b039d07m', 'ext': 'flv', - 'title': 'Kaleidoscope, Leonard Cohen', + 'title': 'Leonard Cohen, Kaleidoscope - BBC Radio 4', 'description': 'The Canadian poet and songwriter reflects on his musical career.', - 'duration': 1740, }, 'params': { # rtmp download @@ -96,7 +111,8 @@ class BBCCoUkIE(InfoExtractor): 'params': { # rtmp download 'skip_download': True, - } + }, + 'skip': 'Episode is no longer available on BBC iPlayer Radio', }, { 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3', 'note': 'Audio', @@ -154,6 +170,21 @@ class BBCCoUkIE(InfoExtractor): }, 'skip': 'geolocation', }, { + # iptv-all mediaset fails with geolocation however there is no geo restriction + # for this programme at all + 'url': 'http://www.bbc.co.uk/programmes/b06bp7lf', + 'info_dict': { + 'id': 'b06bp7kf', + 'ext': 'flv', + 'title': "Annie Mac's Friday Night, B.Traits sits in for Annie", + 'description': 'B.Traits sits in for Annie Mac with a Mini-Mix from Disclosure.', + 'duration': 10800, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4', 'only_matching': True, }, { @@ -175,6 +206,7 @@ class BBCCoUkIE(InfoExtractor): def _extract_connection(self, connection, programme_id): formats = [] + kind = connection.get('kind') protocol = connection.get('protocol') supplier = connection.get('supplier') if protocol == 'http': @@ -191,16 +223,14 @@ class BBCCoUkIE(InfoExtractor): elif transfer_format == 'dash': pass elif transfer_format == 'hls': - m3u8_formats = self._extract_m3u8_formats( + formats.extend(self._extract_m3u8_formats( href, programme_id, ext='mp4', entry_protocol='m3u8_native', - m3u8_id=supplier, fatal=False) - if m3u8_formats: - formats.extend(m3u8_formats) + m3u8_id=supplier, fatal=False)) # Direct link else: formats.append({ 'url': href, - 'format_id': supplier, + 'format_id': supplier or kind or protocol, }) elif protocol == 'rtmp': application = connection.get('application', 'ondemand') @@ -220,16 +250,24 @@ class BBCCoUkIE(InfoExtractor): return formats def _extract_items(self, playlist): - return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') + return playlist.findall('./{%s}item' % self._EMP_PLAYLIST_NS) + + def _findall_ns(self, element, xpath): + elements = [] + for ns in self._NAMESPACES: + elements.extend(element.findall(xpath % ns)) + return elements def _extract_medias(self, media_selection): - error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error') + error = media_selection.find('./{%s}error' % self._MEDIASELECTION_NS) + if error is None: + media_selection.find('./{%s}error' % self._EMP_PLAYLIST_NS) if error is not None: raise BBCCoUkIE.MediaSelectionError(error.get('id')) - return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') + return self._findall_ns(media_selection, './{%s}media') def _extract_connections(self, media): - return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection') + return self._findall_ns(media, './{%s}connection') def _extract_video(self, media, programme_id): formats = [] @@ -243,13 +281,14 @@ class BBCCoUkIE(InfoExtractor): conn_formats = self._extract_connection(connection, programme_id) for format in conn_formats: format.update({ - 'format_id': '%s_%s' % (service, format['format_id']), 'width': width, 'height': height, 'vbr': vbr, 'vcodec': vcodec, 'filesize': file_size, }) + if service: + format['format_id'] = '%s_%s' % (service, format['format_id']) formats.extend(conn_formats) return formats @@ -294,7 +333,7 @@ class BBCCoUkIE(InfoExtractor): return self._download_media_selector_url( mediaselector_url % programme_id, programme_id) except BBCCoUkIE.MediaSelectionError as e: - if e.id == 'notukerror': + if e.id in ('notukerror', 'geolocation', 'selectionunavailable'): last_exception = e continue self._raise_extractor_error(e) @@ -305,8 +344,8 @@ class BBCCoUkIE(InfoExtractor): media_selection = self._download_xml( url, programme_id, 'Downloading media selection XML') except ExtractorError as ee: - if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: - media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8')) + if isinstance(ee.cause, compat_HTTPError) and ee.cause.code in (403, 404): + media_selection = compat_etree_fromstring(ee.cause.read().decode('utf-8')) else: raise return self._process_media_selector(media_selection, programme_id) @@ -364,7 +403,7 @@ class BBCCoUkIE(InfoExtractor): url, playlist_id, 'Downloading legacy playlist XML') def _extract_from_legacy_playlist(self, playlist, playlist_id): - no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') + no_items = playlist.find('./{%s}noItems' % self._EMP_PLAYLIST_NS) if no_items is not None: reason = no_items.get('reason') if reason == 'preAvailability': @@ -381,8 +420,9 @@ class BBCCoUkIE(InfoExtractor): kind = item.get('kind') if kind != 'programme' and kind != 'radioProgramme': continue - title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text - description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text + title = playlist.find('./{%s}title' % self._EMP_PLAYLIST_NS).text + description_el = playlist.find('./{%s}summary' % self._EMP_PLAYLIST_NS) + description = description_el.text if description_el is not None else None def get_programme_id(item): def get_from_attributes(item): @@ -391,16 +431,18 @@ class BBCCoUkIE(InfoExtractor): if value and re.match(r'^[pb][\da-z]{7}$', value): return value get_from_attributes(item) - mediator = item.find('./{http://bbc.co.uk/2008/emp/playlist}mediator') + mediator = item.find('./{%s}mediator' % self._EMP_PLAYLIST_NS) if mediator is not None: return get_from_attributes(mediator) programme_id = get_programme_id(item) duration = int_or_none(item.get('duration')) - # TODO: programme_id can be None and media items can be incorporated right inside - # playlist's item (e.g. http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu) - # as f4m and m3u8 - formats, subtitles = self._download_media_selector(programme_id) + + if programme_id: + formats, subtitles = self._download_media_selector(programme_id) + else: + formats, subtitles = self._process_media_selector(item, playlist_id) + programme_id = playlist_id return programme_id, title, description, duration, formats, subtitles @@ -410,6 +452,7 @@ class BBCCoUkIE(InfoExtractor): webpage = self._download_webpage(url, group_id, 'Downloading video page') programme_id = None + duration = None tviplayer = self._search_regex( r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById', @@ -422,14 +465,16 @@ class BBCCoUkIE(InfoExtractor): if not programme_id: programme_id = self._search_regex( - r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None) + r'"vpid"\s*:\s*"(%s)"' % self._ID_REGEX, webpage, 'vpid', fatal=False, default=None) if programme_id: formats, subtitles = self._download_media_selector(programme_id) title = self._og_search_title(webpage) description = self._search_regex( r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>', - webpage, 'description', fatal=False) + webpage, 'description', default=None) + if not description: + description = self._html_search_meta('description', webpage) else: programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) @@ -452,6 +497,9 @@ class BBCIE(BBCCoUkIE): _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)' _MEDIASELECTOR_URLS = [ + # Provides HQ HLS streams but fails with geolocation in some cases when it's + # even not geo restricted at all + 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/iptv-all/vpid/%s', # Provides more formats, namely direct mp4 links, but fails on some videos with # notukerror for non UK (?) users (e.g. # http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret) @@ -461,8 +509,7 @@ class BBCIE(BBCCoUkIE): ] _TESTS = [{ - # article with multiple videos embedded with data-media-meta containing - # playlist.sxml, externalId and no direct video links + # article with multiple videos embedded with data-playable containing vpids 'url': 'http://www.bbc.com/news/world-europe-32668511', 'info_dict': { 'id': 'world-europe-32668511', @@ -471,7 +518,7 @@ class BBCIE(BBCCoUkIE): }, 'playlist_count': 2, }, { - # article with multiple videos embedded with data-media-meta (more videos) + # article with multiple videos embedded with data-playable (more videos) 'url': 'http://www.bbc.com/news/business-28299555', 'info_dict': { 'id': 'business-28299555', @@ -482,6 +529,7 @@ class BBCIE(BBCCoUkIE): 'skip': 'Save time', }, { # article with multiple videos embedded with `new SMP()` + # broken 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460', 'info_dict': { 'id': '3662a707-0af9-3149-963f-47bea720b460', @@ -489,12 +537,13 @@ class BBCIE(BBCCoUkIE): }, 'playlist_count': 18, }, { - # single video embedded with mediaAssetPage.init() + # single video embedded with data-playable containing vpid 'url': 'http://www.bbc.com/news/world-europe-32041533', 'info_dict': { 'id': 'p02mprgb', 'ext': 'mp4', 'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV', + 'description': 'md5:2868290467291b37feda7863f7a83f54', 'duration': 47, 'timestamp': 1427219242, 'upload_date': '20150324', @@ -504,15 +553,14 @@ class BBCIE(BBCCoUkIE): 'skip_download': True, } }, { - # article with single video embedded with data-media-meta containing - # direct video links (for now these are extracted) and playlist.xml (with - # media items as f4m and m3u8 - currently unsupported) + # article with single video embedded with data-playable containing XML playlist + # with direct video links as progressiveDownloadUrl (for now these are extracted) + # and playlist with f4m and m3u8 as streamingUrl 'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu', 'info_dict': { 'id': '150615_telabyad_kentin_cogu', 'ext': 'mp4', 'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde", - 'duration': 47, 'timestamp': 1434397334, 'upload_date': '20150615', }, @@ -520,13 +568,12 @@ class BBCIE(BBCCoUkIE): 'skip_download': True, } }, { - # single video embedded with mediaAssetPage.init() (regional section) + # single video embedded with data-playable containing XML playlists (regional section) 'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw', 'info_dict': { 'id': '150619_video_honduras_militares_hospitales_corrupcion_aw', 'ext': 'mp4', 'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción', - 'duration': 87, 'timestamp': 1434713142, 'upload_date': '20150619', }, @@ -541,6 +588,7 @@ class BBCIE(BBCCoUkIE): 'ext': 'mp4', 'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', 'duration': 56, + 'description': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', }, 'params': { 'skip_download': True, @@ -568,21 +616,21 @@ class BBCIE(BBCCoUkIE): 'ext': 'mp4', 'title': 'Hyundai Santa Fe Sport: Rock star', 'description': 'md5:b042a26142c4154a6e472933cf20793d', - 'timestamp': 1368473503, - 'upload_date': '20130513', + 'timestamp': 1415867444, + 'upload_date': '20141113', }, 'params': { # rtmp download 'skip_download': True, } }, { - # single video with playlist.sxml URL + # single video with playlist.sxml URL in playlist param 'url': 'http://www.bbc.com/sport/0/football/33653409', 'info_dict': { 'id': 'p02xycnp', 'ext': 'mp4', 'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?', - 'description': 'md5:398fca0e2e701c609d726e034fa1fc89', + 'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.', 'duration': 140, }, 'params': { @@ -590,6 +638,14 @@ class BBCIE(BBCCoUkIE): 'skip_download': True, } }, { + # article with multiple videos embedded with playlist.sxml in playlist param + 'url': 'http://www.bbc.com/sport/0/football/34475836', + 'info_dict': { + 'id': '34475836', + 'title': 'What Liverpool can expect from Klopp', + }, + 'playlist_count': 3, + }, { # single video with playlist URL from weather section 'url': 'http://www.bbc.com/weather/features/33601775', 'only_matching': True, @@ -601,7 +657,7 @@ class BBCIE(BBCCoUkIE): @classmethod def suitable(cls, url): - return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url) + return False if BBCCoUkIE.suitable(url) or BBCCoUkArticleIE.suitable(url) else super(BBCIE, cls).suitable(url) def _extract_from_media_meta(self, media_meta, video_id): # Direct links to media in media metadata (e.g. @@ -630,40 +686,109 @@ class BBCIE(BBCCoUkIE): return [], [] + def _extract_from_playlist_sxml(self, url, playlist_id, timestamp): + programme_id, title, description, duration, formats, subtitles = \ + self._process_legacy_playlist_url(url, playlist_id) + self._sort_formats(formats) + return { + 'id': programme_id, + 'title': title, + 'description': description, + 'duration': duration, + 'timestamp': timestamp, + 'formats': formats, + 'subtitles': subtitles, + } + def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) - timestamp = parse_iso8601(self._search_regex( - [r'"datePublished":\s*"([^"]+)', - r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"', - r'itemprop="datePublished"[^>]+datetime="([^"]+)"'], - webpage, 'date', default=None)) - - # single video with playlist.sxml URL (e.g. http://www.bbc.com/sport/0/football/3365340ng) - playlist = self._search_regex( - r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', - webpage, 'playlist', default=None) - if playlist: - programme_id, title, description, duration, formats, subtitles = \ - self._process_legacy_playlist_url(playlist, playlist_id) - self._sort_formats(formats) - return { - 'id': programme_id, - 'title': title, - 'description': description, - 'duration': duration, - 'timestamp': timestamp, - 'formats': formats, - 'subtitles': subtitles, - } + timestamp = None + playlist_title = None + playlist_description = None + + ld = self._parse_json( + self._search_regex( + r'(?s)<script type="application/ld\+json">(.+?)</script>', + webpage, 'ld json', default='{}'), + playlist_id, fatal=False) + if ld: + timestamp = parse_iso8601(ld.get('datePublished')) + playlist_title = ld.get('headline') + playlist_description = ld.get('articleBody') + + if not timestamp: + timestamp = parse_iso8601(self._search_regex( + [r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"', + r'itemprop="datePublished"[^>]+datetime="([^"]+)"', + r'"datePublished":\s*"([^"]+)'], + webpage, 'date', default=None)) + + entries = [] + + # article with multiple videos embedded with playlist.sxml (e.g. + # http://www.bbc.com/sport/0/football/34475836) + playlists = re.findall(r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage) + playlists.extend(re.findall(r'data-media-id="([^"]+/playlist\.sxml)"', webpage)) + if playlists: + entries = [ + self._extract_from_playlist_sxml(playlist_url, playlist_id, timestamp) + for playlist_url in playlists] + + # news article with multiple videos embedded with data-playable + data_playables = re.findall(r'data-playable=(["\'])({.+?})\1', webpage) + if data_playables: + for _, data_playable_json in data_playables: + data_playable = self._parse_json( + unescapeHTML(data_playable_json), playlist_id, fatal=False) + if not data_playable: + continue + settings = data_playable.get('settings', {}) + if settings: + # data-playable with video vpid in settings.playlistObject.items (e.g. + # http://www.bbc.com/news/world-us-canada-34473351) + playlist_object = settings.get('playlistObject', {}) + if playlist_object: + items = playlist_object.get('items') + if items and isinstance(items, list): + title = playlist_object['title'] + description = playlist_object.get('summary') + duration = int_or_none(items[0].get('duration')) + programme_id = items[0].get('vpid') + formats, subtitles = self._download_media_selector(programme_id) + self._sort_formats(formats) + entries.append({ + 'id': programme_id, + 'title': title, + 'description': description, + 'timestamp': timestamp, + 'duration': duration, + 'formats': formats, + 'subtitles': subtitles, + }) + else: + # data-playable without vpid but with a playlist.sxml URLs + # in otherSettings.playlist (e.g. + # http://www.bbc.com/turkce/multimedya/2015/10/151010_vid_ankara_patlama_ani) + playlist = data_playable.get('otherSettings', {}).get('playlist', {}) + if playlist: + entries.append(self._extract_from_playlist_sxml( + playlist.get('progressiveDownloadUrl'), playlist_id, timestamp)) + + if entries: + playlist_title = playlist_title or remove_end(self._og_search_title(webpage), ' - BBC News') + playlist_description = playlist_description or self._og_search_description(webpage, default=None) + return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) # single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret) programme_id = self._search_regex( - [r'data-video-player-vpid="([\da-z]{8})"', - r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'], + [r'data-video-player-vpid="(%s)"' % self._ID_REGEX, + r'<param[^>]+name="externalIdentifier"[^>]+value="(%s)"' % self._ID_REGEX, + r'videoId\s*:\s*["\'](%s)["\']' % self._ID_REGEX], webpage, 'vpid', default=None) + if programme_id: formats, subtitles = self._download_media_selector(programme_id) self._sort_formats(formats) @@ -696,7 +821,7 @@ class BBCIE(BBCCoUkIE): # Multiple video article (e.g. # http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460) - EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?' + EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX entries = [] for match in extract_all(r'new\s+SMP\(({.+?})\)'): embed_url = match.get('playerSettings', {}).get('externalEmbedUrl') @@ -785,3 +910,33 @@ class BBCIE(BBCCoUkIE): }) return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) + + +class BBCCoUkArticleIE(InfoExtractor): + _VALID_URL = 'http://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)' + IE_NAME = 'bbc.co.uk:article' + IE_DESC = 'BBC articles' + + _TEST = { + 'url': 'http://www.bbc.co.uk/programmes/articles/3jNQLTMrPlYGTBn0WV6M2MS/not-your-typical-role-model-ada-lovelace-the-19th-century-programmer', + 'info_dict': { + 'id': '3jNQLTMrPlYGTBn0WV6M2MS', + 'title': 'Calculating Ada: The Countess of Computing - Not your typical role model: Ada Lovelace the 19th century programmer - BBC Four', + 'description': 'Hannah Fry reveals some of her surprising discoveries about Ada Lovelace during filming.', + }, + 'playlist_count': 4, + 'add_ie': ['BBCCoUk'], + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + title = self._og_search_title(webpage) + description = self._og_search_description(webpage).strip() + + entries = [self.url_result(programme_url) for programme_url in re.findall( + r'<div[^>]+typeof="Clip"[^>]+resource="([^"]+)"', webpage)] + + return self.playlist_result(entries, playlist_id, title, description) diff --git a/youtube_dl/extractor/beeg.py b/youtube_dl/extractor/beeg.py index b38057f2f..c8d921daf 100644 --- a/youtube_dl/extractor/beeg.py +++ b/youtube_dl/extractor/beeg.py @@ -1,65 +1,105 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor +from ..compat import ( + compat_chr, + compat_ord, + compat_urllib_parse_unquote, +) +from ..utils import ( + int_or_none, + parse_iso8601, +) class BeegIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)' _TEST = { 'url': 'http://beeg.com/5416503', - 'md5': '1bff67111adb785c51d1b42959ec10e5', + 'md5': '46c384def73b33dbc581262e5ee67cef', 'info_dict': { 'id': '5416503', 'ext': 'mp4', 'title': 'Sultry Striptease', - 'description': 'md5:6db3c6177972822aaba18652ff59c773', - 'categories': list, # NSFW - 'thumbnail': 're:https?://.*\.jpg$', + 'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2', + 'timestamp': 1391813355, + 'upload_date': '20140207', + 'duration': 383, + 'tags': list, 'age_limit': 18, } } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) + + video = self._download_json( + 'http://beeg.com/api/v5/video/%s' % video_id, video_id) - webpage = self._download_webpage(url, video_id) + def split(o, e): + def cut(s, x): + n.append(s[:x]) + return s[x:] + n = [] + r = len(o) % e + if r > 0: + o = cut(o, r) + while len(o) > e: + o = cut(o, e) + n.append(o) + return n - quality_arr = self._search_regex( - r'(?s)var\s+qualityArr\s*=\s*{\s*(.+?)\s*}', webpage, 'quality formats') + def decrypt_key(key): + # Reverse engineered from http://static.beeg.com/cpl/1105.js + a = '5ShMcIQlssOd7zChAIOlmeTZDaUxULbJRnywYaiB' + e = compat_urllib_parse_unquote(key) + o = ''.join([ + compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21) + for n in range(len(e))]) + return ''.join(split(o, 3)[::-1]) - formats = [{ - 'url': fmt[1], - 'format_id': fmt[0], - 'height': int(fmt[0][:-1]), - } for fmt in re.findall(r"'([^']+)'\s*:\s*'([^']+)'", quality_arr)] + def decrypt_url(encrypted_url): + encrypted_url = self._proto_relative_url( + encrypted_url.replace('{DATA_MARKERS}', ''), 'http:') + key = self._search_regex( + r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None) + if not key: + return encrypted_url + return encrypted_url.replace(key, decrypt_key(key)) + formats = [] + for format_id, video_url in video.items(): + if not video_url: + continue + height = self._search_regex( + r'^(\d+)[pP]$', format_id, 'height', default=None) + if not height: + continue + formats.append({ + 'url': decrypt_url(video_url), + 'format_id': format_id, + 'height': int(height), + }) self._sort_formats(formats) - title = self._html_search_regex( - r'<title>([^<]+)\s*-\s*beeg\.?</title>', webpage, 'title') + title = video['title'] + video_id = video.get('id') or video_id + display_id = video.get('code') + description = video.get('desc') - description = self._html_search_regex( - r'<meta name="description" content="([^"]*)"', - webpage, 'description', fatal=False) - thumbnail = self._html_search_regex( - r'\'previewer.url\'\s*:\s*"([^"]*)"', - webpage, 'thumbnail', fatal=False) + timestamp = parse_iso8601(video.get('date'), ' ') + duration = int_or_none(video.get('duration')) - categories_str = self._html_search_regex( - r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False) - categories = ( - None if categories_str is None - else categories_str.split(',')) + tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None return { 'id': video_id, + 'display_id': display_id, 'title': title, 'description': description, - 'thumbnail': thumbnail, - 'categories': categories, + 'timestamp': timestamp, + 'duration': duration, + 'tags': tags, 'formats': formats, 'age_limit': 18, } diff --git a/youtube_dl/extractor/bild.py b/youtube_dl/extractor/bild.py index 4d8cce1ef..1a0184861 100644 --- a/youtube_dl/extractor/bild.py +++ b/youtube_dl/extractor/bild.py @@ -4,7 +4,7 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( int_or_none, - fix_xml_ampersands, + unescapeHTML, ) @@ -17,26 +17,24 @@ class BildIE(InfoExtractor): 'info_dict': { 'id': '38184146', 'ext': 'mp4', - 'title': 'BILD hat sie getestet', + 'title': 'Das können die neuen iPads', + 'description': 'md5:a4058c4fa2a804ab59c00d7244bbf62f', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 196, - 'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ', } } def _real_extract(self, url): video_id = self._match_id(url) - xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml" - doc = self._download_xml(xml_url, video_id, transform_source=fix_xml_ampersands) - - duration = int_or_none(doc.attrib.get('duration'), scale=1000) + video_data = self._download_json( + url.split('.bild.html')[0] + ',view=json.bild.html', video_id) return { 'id': video_id, - 'title': doc.attrib['ueberschrift'], - 'description': doc.attrib.get('text'), - 'url': doc.attrib['src'], - 'thumbnail': doc.attrib.get('img'), - 'duration': duration, + 'title': unescapeHTML(video_data['title']).strip(), + 'description': unescapeHTML(video_data.get('description')), + 'url': video_data['clipList'][0]['srces'][0]['src'], + 'thumbnail': video_data.get('poster'), + 'duration': int_or_none(video_data.get('durationSec')), } diff --git a/youtube_dl/extractor/bilibili.py b/youtube_dl/extractor/bilibili.py index ecc17ebeb..59beb11bc 100644 --- a/youtube_dl/extractor/bilibili.py +++ b/youtube_dl/extractor/bilibili.py @@ -2,141 +2,109 @@ from __future__ import unicode_literals import re -import itertools -import json -import xml.etree.ElementTree as ET from .common import InfoExtractor +from ..compat import compat_str from ..utils import ( int_or_none, - unified_strdate, + unescapeHTML, ExtractorError, + xpath_text, ) class BiliBiliIE(InfoExtractor): - _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>[0-9]+)/' + _VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?' _TESTS = [{ 'url': 'http://www.bilibili.tv/video/av1074402/', 'md5': '2c301e4dab317596e837c3e7633e7d86', 'info_dict': { - 'id': '1074402_part1', + 'id': '1554319', 'ext': 'flv', 'title': '【金坷垃】金泡沫', - 'duration': 308, + 'duration': 308313, 'upload_date': '20140420', 'thumbnail': 're:^https?://.+\.jpg', + 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', + 'timestamp': 1397983878, + 'uploader': '菊子桑', }, }, { 'url': 'http://www.bilibili.com/video/av1041170/', 'info_dict': { 'id': '1041170', 'title': '【BD1080P】刀语【诸神&异域】', + 'description': '这是个神奇的故事~每个人不留弹幕不给走哦~切利哦!~', + 'uploader': '枫叶逝去', + 'timestamp': 1396501299, }, 'playlist_count': 9, }] def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - if '(此视频不存在或被删除)' in webpage: - raise ExtractorError( - 'The video does not exist or was deleted', expected=True) - - if '>你没有权限浏览! 由于版权相关问题 我们不对您所在的地区提供服务<' in webpage: - raise ExtractorError( - 'The video is not available in your region due to copyright reasons', - expected=True) - - video_code = self._search_regex( - r'(?s)<div itemprop="video".*?>(.*?)</div>', webpage, 'video code') - - title = self._html_search_meta( - 'media:title', video_code, 'title', fatal=True) - duration_str = self._html_search_meta( - 'duration', video_code, 'duration') - if duration_str is None: - duration = None - else: - duration_mobj = re.match( - r'^T(?:(?P<hours>[0-9]+)H)?(?P<minutes>[0-9]+)M(?P<seconds>[0-9]+)S$', - duration_str) - duration = ( - int_or_none(duration_mobj.group('hours'), default=0) * 3600 + - int(duration_mobj.group('minutes')) * 60 + - int(duration_mobj.group('seconds'))) - upload_date = unified_strdate(self._html_search_meta( - 'uploadDate', video_code, fatal=False)) - thumbnail = self._html_search_meta( - 'thumbnailUrl', video_code, 'thumbnail', fatal=False) - - cid = self._search_regex(r'cid=(\d+)', webpage, 'cid') - - entries = [] - - lq_page = self._download_webpage( - 'http://interface.bilibili.com/v_cdn_play?appkey=1&cid=%s' % cid, - video_id, - note='Downloading LQ video info' + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + page_num = mobj.group('page_num') or '1' + + view_data = self._download_json( + 'http://api.bilibili.com/view?type=json&appkey=8e9fc618fbd41e28&id=%s&page=%s' % (video_id, page_num), + video_id) + if 'error' in view_data: + raise ExtractorError('%s said: %s' % (self.IE_NAME, view_data['error']), expected=True) + + cid = view_data['cid'] + title = unescapeHTML(view_data['title']) + + doc = self._download_xml( + 'http://interface.bilibili.com/v_cdn_play?appkey=8e9fc618fbd41e28&cid=%s' % cid, + cid, + 'Downloading page %s/%s' % (page_num, view_data['pages']) ) - try: - err_info = json.loads(lq_page) - raise ExtractorError( - 'BiliBili said: ' + err_info['error_text'], expected=True) - except ValueError: - pass - lq_doc = ET.fromstring(lq_page) - lq_durls = lq_doc.findall('./durl') + if xpath_text(doc, './result') == 'error': + raise ExtractorError('%s said: %s' % (self.IE_NAME, xpath_text(doc, './message')), expected=True) - hq_doc = self._download_xml( - 'http://interface.bilibili.com/playurl?appkey=1&cid=%s' % cid, - video_id, - note='Downloading HQ video info', - fatal=False, - ) - if hq_doc is not False: - hq_durls = hq_doc.findall('./durl') - assert len(lq_durls) == len(hq_durls) - else: - hq_durls = itertools.repeat(None) + entries = [] - i = 1 - for lq_durl, hq_durl in zip(lq_durls, hq_durls): + for durl in doc.findall('./durl'): + size = xpath_text(durl, ['./filesize', './size']) formats = [{ - 'format_id': 'lq', - 'quality': 1, - 'url': lq_durl.find('./url').text, - 'filesize': int_or_none( - lq_durl.find('./size'), get_attr='text'), + 'url': durl.find('./url').text, + 'filesize': int_or_none(size), + 'ext': 'flv', }] - if hq_durl is not None: - formats.append({ - 'format_id': 'hq', - 'quality': 2, - 'ext': 'flv', - 'url': hq_durl.find('./url').text, - 'filesize': int_or_none( - hq_durl.find('./size'), get_attr='text'), - }) - self._sort_formats(formats) + backup_urls = durl.find('./backup_url') + if backup_urls is not None: + for backup_url in backup_urls.findall('./url'): + formats.append({'url': backup_url.text}) + formats.reverse() entries.append({ - 'id': '%s_part%d' % (video_id, i), + 'id': '%s_part%s' % (cid, xpath_text(durl, './order')), 'title': title, + 'duration': int_or_none(xpath_text(durl, './length'), 1000), 'formats': formats, - 'duration': duration, - 'upload_date': upload_date, - 'thumbnail': thumbnail, }) - i += 1 - - return { - '_type': 'multi_video', - 'entries': entries, - 'id': video_id, - 'title': title + info = { + 'id': compat_str(cid), + 'title': title, + 'description': view_data.get('description'), + 'thumbnail': view_data.get('pic'), + 'uploader': view_data.get('author'), + 'timestamp': int_or_none(view_data.get('created')), + 'view_count': int_or_none(view_data.get('play')), + 'duration': int_or_none(xpath_text(doc, './timelength')), } + + if len(entries) == 1: + entries[0].update(info) + return entries[0] + else: + info.update({ + '_type': 'multi_video', + 'id': video_id, + 'entries': entries, + }) + return info diff --git a/youtube_dl/extractor/bleacherreport.py b/youtube_dl/extractor/bleacherreport.py new file mode 100644 index 000000000..38bda3af5 --- /dev/null +++ b/youtube_dl/extractor/bleacherreport.py @@ -0,0 +1,106 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from .amp import AMPIE +from ..utils import ( + ExtractorError, + int_or_none, + parse_iso8601, +) + + +class BleacherReportIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)' + _TESTS = [{ + 'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football', + 'md5': 'a3ffc3dc73afdbc2010f02d98f990f20', + 'info_dict': { + 'id': '2496438', + 'ext': 'mp4', + 'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?', + 'uploader_id': 3992341, + 'description': 'CFB, ACC, Florida State', + 'timestamp': 1434380212, + 'upload_date': '20150615', + 'uploader': 'Team Stream Now ', + }, + 'add_ie': ['Ooyala'], + }, { + 'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo', + 'md5': 'af5f90dc9c7ba1c19d0a3eac806bbf50', + 'info_dict': { + 'id': '2586817', + 'ext': 'mp4', + 'title': 'Aussie Golfers Get Fright of Their Lives After Being Chased by Angry Kangaroo', + 'timestamp': 1446839961, + 'uploader': 'Sean Fay', + 'description': 'md5:825e94e0f3521df52fa83b2ed198fa20', + 'uploader_id': 6466954, + 'upload_date': '20151011', + }, + 'add_ie': ['Youtube'], + }] + + def _real_extract(self, url): + article_id = self._match_id(url) + + article_data = self._download_json('http://api.bleacherreport.com/api/v1/articles/%s' % article_id, article_id)['article'] + + thumbnails = [] + primary_photo = article_data.get('primaryPhoto') + if primary_photo: + thumbnails = [{ + 'url': primary_photo['url'], + 'width': primary_photo.get('width'), + 'height': primary_photo.get('height'), + }] + + info = { + '_type': 'url_transparent', + 'id': article_id, + 'title': article_data['title'], + 'uploader': article_data.get('author', {}).get('name'), + 'uploader_id': article_data.get('authorId'), + 'timestamp': parse_iso8601(article_data.get('createdAt')), + 'thumbnails': thumbnails, + 'comment_count': int_or_none(article_data.get('commentsCount')), + 'view_count': int_or_none(article_data.get('hitCount')), + } + + video = article_data.get('video') + if video: + video_type = video['type'] + if video_type == 'cms.bleacherreport.com': + info['url'] = 'http://bleacherreport.com/video_embed?id=%s' % video['id'] + elif video_type == 'ooyala.com': + info['url'] = 'ooyala:%s' % video['id'] + elif video_type == 'youtube.com': + info['url'] = video['id'] + elif video_type == 'vine.co': + info['url'] = 'https://vine.co/v/%s' % video['id'] + else: + info['url'] = video_type + video['id'] + return info + else: + raise ExtractorError('no video in the article', expected=True) + + +class BleacherReportCMSIE(AMPIE): + _VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36})' + _TESTS = [{ + 'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1', + 'md5': '8c2c12e3af7805152675446c905d159b', + 'info_dict': { + 'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1', + 'ext': 'flv', + 'title': 'Cena vs. Rollins Would Expose the Heavyweight Division', + 'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e', + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + info = self._extract_feed_info('http://cms.bleacherreport.com/media/items/%s/akamai.json' % video_id) + info['id'] = video_id + return info diff --git a/youtube_dl/extractor/bliptv.py b/youtube_dl/extractor/bliptv.py deleted file mode 100644 index c3296283d..000000000 --- a/youtube_dl/extractor/bliptv.py +++ /dev/null @@ -1,292 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor - -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) -from ..utils import ( - clean_html, - int_or_none, - parse_iso8601, - unescapeHTML, - xpath_text, - xpath_with_ns, -) - - -class BlipTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))' - - _TESTS = [ - { - 'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352', - 'md5': '80baf1ec5c3d2019037c1c707d676b9f', - 'info_dict': { - 'id': '5779306', - 'ext': 'm4v', - 'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3', - 'description': 'md5:9bc31f227219cde65e47eeec8d2dc596', - 'timestamp': 1323138843, - 'upload_date': '20111206', - 'uploader': 'cbr', - 'uploader_id': '679425', - 'duration': 81, - } - }, - { - # https://github.com/rg3/youtube-dl/pull/2274 - 'note': 'Video with subtitles', - 'url': 'http://blip.tv/play/h6Uag5OEVgI.html', - 'md5': '309f9d25b820b086ca163ffac8031806', - 'info_dict': { - 'id': '6586561', - 'ext': 'mp4', - 'title': 'Red vs. Blue Season 11 Episode 1', - 'description': 'One-Zero-One', - 'timestamp': 1371261608, - 'upload_date': '20130615', - 'uploader': 'redvsblue', - 'uploader_id': '792887', - 'duration': 279, - } - }, - { - # https://bugzilla.redhat.com/show_bug.cgi?id=967465 - 'url': 'http://a.blip.tv/api.swf#h6Uag5KbVwI', - 'md5': '314e87b1ebe7a48fcbfdd51b791ce5a6', - 'info_dict': { - 'id': '6573122', - 'ext': 'mov', - 'upload_date': '20130520', - 'description': 'Two hapless space marines argue over what to do when they realize they have an astronomically huge problem on their hands.', - 'title': 'Red vs. Blue Season 11 Trailer', - 'timestamp': 1369029609, - 'uploader': 'redvsblue', - 'uploader_id': '792887', - } - }, - { - 'url': 'http://blip.tv/play/gbk766dkj4Yn', - 'md5': 'fe0a33f022d49399a241e84a8ea8b8e3', - 'info_dict': { - 'id': '1749452', - 'ext': 'mp4', - 'upload_date': '20090208', - 'description': 'Witness the first appearance of the Nostalgia Critic character, as Doug reviews the movie Transformers.', - 'title': 'Nostalgia Critic: Transformers', - 'timestamp': 1234068723, - 'uploader': 'NostalgiaCritic', - 'uploader_id': '246467', - } - }, - { - # https://github.com/rg3/youtube-dl/pull/4404 - 'note': 'Audio only', - 'url': 'http://blip.tv/hilarios-productions/weekly-manga-recap-kingdom-7119982', - 'md5': '76c0a56f24e769ceaab21fbb6416a351', - 'info_dict': { - 'id': '7103299', - 'ext': 'flv', - 'title': 'Weekly Manga Recap: Kingdom', - 'description': 'And then Shin breaks the enemy line, and he's all like HWAH! And then he slices a guy and it's all like FWASHING! And... it's really hard to describe the best parts of this series without breaking down into sound effects, okay?', - 'timestamp': 1417660321, - 'upload_date': '20141204', - 'uploader': 'The Rollo T', - 'uploader_id': '407429', - 'duration': 7251, - 'vcodec': 'none', - } - }, - { - # missing duration - 'url': 'http://blip.tv/rss/flash/6700880', - 'info_dict': { - 'id': '6684191', - 'ext': 'm4v', - 'title': 'Cowboy Bebop: Gateway Shuffle Review', - 'description': 'md5:3acc480c0f9ae157f5fe88547ecaf3f8', - 'timestamp': 1386639757, - 'upload_date': '20131210', - 'uploader': 'sfdebris', - 'uploader_id': '706520', - } - } - ] - - @staticmethod - def _extract_url(webpage): - mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage) - if mobj: - return 'http://blip.tv/a/a-' + mobj.group(1) - mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*(https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)[a-zA-Z0-9_]+)', webpage) - if mobj: - return mobj.group(1) - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - lookup_id = mobj.group('lookup_id') - - # See https://github.com/rg3/youtube-dl/issues/857 and - # https://github.com/rg3/youtube-dl/issues/4197 - if lookup_id: - urlh = self._request_webpage( - 'http://blip.tv/play/%s' % lookup_id, lookup_id, 'Resolving lookup id') - url = compat_urlparse.urlparse(urlh.geturl()) - qs = compat_urlparse.parse_qs(url.query) - mobj = re.match(self._VALID_URL, qs['file'][0]) - - video_id = mobj.group('id') - - rss = self._download_xml('http://blip.tv/rss/flash/%s' % video_id, video_id, 'Downloading video RSS') - - def _x(p): - return xpath_with_ns(p, { - 'blip': 'http://blip.tv/dtd/blip/1.0', - 'media': 'http://search.yahoo.com/mrss/', - 'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd', - }) - - item = rss.find('channel/item') - - video_id = xpath_text(item, _x('blip:item_id'), 'video id') or lookup_id - title = xpath_text(item, 'title', 'title', fatal=True) - description = clean_html(xpath_text(item, _x('blip:puredescription'), 'description')) - timestamp = parse_iso8601(xpath_text(item, _x('blip:datestamp'), 'timestamp')) - uploader = xpath_text(item, _x('blip:user'), 'uploader') - uploader_id = xpath_text(item, _x('blip:userid'), 'uploader id') - duration = int_or_none(xpath_text(item, _x('blip:runtime'), 'duration')) - media_thumbnail = item.find(_x('media:thumbnail')) - thumbnail = (media_thumbnail.get('url') if media_thumbnail is not None - else xpath_text(item, 'image', 'thumbnail')) - categories = [category.text for category in item.findall('category') if category is not None] - - formats = [] - subtitles_urls = {} - - media_group = item.find(_x('media:group')) - for media_content in media_group.findall(_x('media:content')): - url = media_content.get('url') - role = media_content.get(_x('blip:role')) - msg = self._download_webpage( - url + '?showplayer=20140425131715&referrer=http://blip.tv&mask=7&skin=flashvars&view=url', - video_id, 'Resolving URL for %s' % role) - real_url = compat_urlparse.parse_qs(msg.strip())['message'][0] - - media_type = media_content.get('type') - if media_type == 'text/srt' or url.endswith('.srt'): - LANGS = { - 'english': 'en', - } - lang = role.rpartition('-')[-1].strip().lower() - langcode = LANGS.get(lang, lang) - subtitles_urls[langcode] = url - elif media_type.startswith('video/'): - formats.append({ - 'url': real_url, - 'format_id': role, - 'format_note': media_type, - 'vcodec': media_content.get(_x('blip:vcodec')) or 'none', - 'acodec': media_content.get(_x('blip:acodec')), - 'filesize': media_content.get('filesize'), - 'width': int_or_none(media_content.get('width')), - 'height': int_or_none(media_content.get('height')), - }) - self._check_formats(formats, video_id) - self._sort_formats(formats) - - subtitles = self.extract_subtitles(video_id, subtitles_urls) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'timestamp': timestamp, - 'uploader': uploader, - 'uploader_id': uploader_id, - 'duration': duration, - 'thumbnail': thumbnail, - 'categories': categories, - 'formats': formats, - 'subtitles': subtitles, - } - - def _get_subtitles(self, video_id, subtitles_urls): - subtitles = {} - for lang, url in subtitles_urls.items(): - # For some weird reason, blip.tv serves a video instead of subtitles - # when we request with a common UA - req = compat_urllib_request.Request(url) - req.add_header('User-Agent', 'youtube-dl') - subtitles[lang] = [{ - # The extension is 'srt' but it's actually an 'ass' file - 'ext': 'ass', - 'data': self._download_webpage(req, None, note=False), - }] - return subtitles - - -class BlipTVUserIE(InfoExtractor): - _VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$' - _PAGE_SIZE = 12 - IE_NAME = 'blip.tv:user' - _TEST = { - 'url': 'http://blip.tv/actone', - 'info_dict': { - 'id': 'actone', - 'title': 'Act One: The Series', - }, - 'playlist_count': 5, - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - username = mobj.group(1) - - page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' - - page = self._download_webpage(url, username, 'Downloading user page') - mobj = re.search(r'data-users-id="([^"]+)"', page) - page_base = page_base % mobj.group(1) - title = self._og_search_title(page) - - # Download video ids using BlipTV Ajax calls. Result size per - # query is limited (currently to 12 videos) so we need to query - # page by page until there are no video ids - it means we got - # all of them. - - video_ids = [] - pagenum = 1 - - while True: - url = page_base + "&page=" + str(pagenum) - page = self._download_webpage( - url, username, 'Downloading video ids from page %d' % pagenum) - - # Extract video identifiers - ids_in_page = [] - - for mobj in re.finditer(r'href="/([^"]+)"', page): - if mobj.group(1) not in ids_in_page: - ids_in_page.append(unescapeHTML(mobj.group(1))) - - video_ids.extend(ids_in_page) - - # A little optimization - if current page is not - # "full", ie. does not contain PAGE_SIZE video ids then - # we can assume that this page is the last one - there - # are no more ids on further pages - no need to query - # again. - - if len(ids_in_page) < self._PAGE_SIZE: - break - - pagenum += 1 - - urls = ['http://blip.tv/%s' % video_id for video_id in video_ids] - url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls] - return self.playlist_result( - url_entries, playlist_title=title, playlist_id=username) diff --git a/youtube_dl/extractor/bloomberg.py b/youtube_dl/extractor/bloomberg.py index 0dca29b71..13343bc25 100644 --- a/youtube_dl/extractor/bloomberg.py +++ b/youtube_dl/extractor/bloomberg.py @@ -6,9 +6,9 @@ from .common import InfoExtractor class BloombergIE(InfoExtractor): - _VALID_URL = r'https?://www\.bloomberg\.com/news/videos/[^/]+/(?P<id>[^/?#]+)' + _VALID_URL = r'https?://(?:www\.)?bloomberg\.com/(?:[^/]+/)*(?P<id>[^/?#]+)' - _TEST = { + _TESTS = [{ 'url': 'http://www.bloomberg.com/news/videos/b/aaeae121-5949-481e-a1ce-4562db6f5df2', # The md5 checksum changes 'info_dict': { @@ -17,22 +17,35 @@ class BloombergIE(InfoExtractor): 'title': 'Shah\'s Presentation on Foreign-Exchange Strategies', 'description': 'md5:a8ba0302912d03d246979735c17d2761', }, - } + }, { + 'url': 'http://www.bloomberg.com/news/articles/2015-11-12/five-strange-things-that-have-been-happening-in-financial-markets', + 'only_matching': True, + }, { + 'url': 'http://www.bloomberg.com/politics/videos/2015-11-25/karl-rove-on-jeb-bush-s-struggles-stopping-trump', + 'only_matching': True, + }] def _real_extract(self, url): name = self._match_id(url) webpage = self._download_webpage(url, name) - video_id = self._search_regex(r'"bmmrId":"(.+?)"', webpage, 'id') + video_id = self._search_regex( + r'["\']bmmrId["\']\s*:\s*(["\'])(?P<url>.+?)\1', + webpage, 'id', group='url') title = re.sub(': Video$', '', self._og_search_title(webpage)) embed_info = self._download_json( 'http://www.bloomberg.com/api/embed?id=%s' % video_id, video_id) formats = [] for stream in embed_info['streams']: - if stream["muxing_format"] == "TS": - formats.extend(self._extract_m3u8_formats(stream['url'], video_id)) + stream_url = stream.get('url') + if not stream_url: + continue + if stream['muxing_format'] == 'TS': + formats.extend(self._extract_m3u8_formats( + stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) else: - formats.extend(self._extract_f4m_formats(stream['url'], video_id)) + formats.extend(self._extract_f4m_formats( + stream_url, video_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/br.py b/youtube_dl/extractor/br.py index 66e394e10..11cf49851 100644 --- a/youtube_dl/extractor/br.py +++ b/youtube_dl/extractor/br.py @@ -1,18 +1,21 @@ # coding: utf-8 from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_duration, + xpath_element, + xpath_text, ) class BRIE(InfoExtractor): IE_DESC = 'Bayerischer Rundfunk Mediathek' - _VALID_URL = r'https?://(?:www\.)?br\.de/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html' - _BASE_URL = 'http://www.br.de' + _VALID_URL = r'(?P<base_url>https?://(?:www\.)?br(?:-klassik)?\.de)/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html' _TESTS = [ { @@ -22,7 +25,7 @@ class BRIE(InfoExtractor): 'id': '48f656ef-287e-486f-be86-459122db22cc', 'ext': 'mp4', 'title': 'Die böse Überraschung', - 'description': 'Betriebliche Altersvorsorge: Die böse Überraschung', + 'description': 'md5:ce9ac81b466ce775b8018f6801b48ac9', 'duration': 180, 'uploader': 'Reinhard Weber', 'upload_date': '20150422', @@ -30,23 +33,23 @@ class BRIE(InfoExtractor): }, { 'url': 'http://www.br.de/nachrichten/oberbayern/inhalt/muenchner-polizeipraesident-schreiber-gestorben-100.html', - 'md5': 'a44396d73ab6a68a69a568fae10705bb', + 'md5': 'af3a3a4aa43ff0ce6a89504c67f427ef', 'info_dict': { 'id': 'a4b83e34-123d-4b81-9f4e-c0d3121a4e05', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'Manfred Schreiber ist tot', - 'description': 'Abendschau kompakt: Manfred Schreiber ist tot', + 'description': 'md5:b454d867f2a9fc524ebe88c3f5092d97', 'duration': 26, } }, { - 'url': 'http://www.br.de/radio/br-klassik/sendungen/allegro/premiere-urauffuehrung-the-land-2015-dance-festival-muenchen-100.html', + 'url': 'https://www.br-klassik.de/audio/peeping-tom-premierenkritik-dance-festival-muenchen-100.html', 'md5': '8b5b27c0b090f3b35eac4ab3f7a73d3d', 'info_dict': { 'id': '74c603c9-26d3-48bb-b85b-079aeed66e0b', 'ext': 'aac', 'title': 'Kurzweilig und sehr bewegend', - 'description': '"The Land" von Peeping Tom: Kurzweilig und sehr bewegend', + 'description': 'md5:0351996e3283d64adeb38ede91fac54e', 'duration': 296, } }, @@ -57,7 +60,7 @@ class BRIE(InfoExtractor): 'id': '6ba73750-d405-45d3-861d-1ce8c524e059', 'ext': 'mp4', 'title': 'Umweltbewusster Häuslebauer', - 'description': 'Uwe Erdelt: Umweltbewusster Häuslebauer', + 'description': 'md5:d52dae9792d00226348c1dbb13c9bae2', 'duration': 116, } }, @@ -68,7 +71,7 @@ class BRIE(InfoExtractor): 'id': 'd982c9ce-8648-4753-b358-98abb8aec43d', 'ext': 'mp4', 'title': 'Folge 1 - Metaphysik', - 'description': 'Kant für Anfänger: Folge 1 - Metaphysik', + 'description': 'md5:bb659990e9e59905c3d41e369db1fbe3', 'duration': 893, 'uploader': 'Eva Maria Steimle', 'upload_date': '20140117', @@ -77,28 +80,31 @@ class BRIE(InfoExtractor): ] def _real_extract(self, url): - display_id = self._match_id(url) + base_url, display_id = re.search(self._VALID_URL, url).groups() page = self._download_webpage(url, display_id) xml_url = self._search_regex( r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL') - xml = self._download_xml(self._BASE_URL + xml_url, None) + xml = self._download_xml(base_url + xml_url, display_id) medias = [] for xml_media in xml.findall('video') + xml.findall('audio'): + media_id = xml_media.get('externalId') media = { - 'id': xml_media.get('externalId'), - 'title': xml_media.find('title').text, - 'duration': parse_duration(xml_media.find('duration').text), - 'formats': self._extract_formats(xml_media.find('assets')), - 'thumbnails': self._extract_thumbnails(xml_media.find('teaserImage/variants')), - 'description': ' '.join(xml_media.find('shareTitle').text.splitlines()), - 'webpage_url': xml_media.find('permalink').text + 'id': media_id, + 'title': xpath_text(xml_media, 'title', 'title', True), + 'duration': parse_duration(xpath_text(xml_media, 'duration')), + 'formats': self._extract_formats(xpath_element( + xml_media, 'assets'), media_id), + 'thumbnails': self._extract_thumbnails(xpath_element( + xml_media, 'teaserImage/variants'), base_url), + 'description': xpath_text(xml_media, 'desc'), + 'webpage_url': xpath_text(xml_media, 'permalink'), + 'uploader': xpath_text(xml_media, 'author'), } - if xml_media.find('author').text: - media['uploader'] = xml_media.find('author').text - if xml_media.find('broadcastDate').text: - media['upload_date'] = ''.join(reversed(xml_media.find('broadcastDate').text.split('.'))) + broadcast_date = xpath_text(xml_media, 'broadcastDate') + if broadcast_date: + media['upload_date'] = ''.join(reversed(broadcast_date.split('.'))) medias.append(media) if len(medias) > 1: @@ -109,35 +115,54 @@ class BRIE(InfoExtractor): raise ExtractorError('No media entries found') return medias[0] - def _extract_formats(self, assets): - - def text_or_none(asset, tag): - elem = asset.find(tag) - return None if elem is None else elem.text - - formats = [{ - 'url': text_or_none(asset, 'downloadUrl'), - 'ext': text_or_none(asset, 'mediaType'), - 'format_id': asset.get('type'), - 'width': int_or_none(text_or_none(asset, 'frameWidth')), - 'height': int_or_none(text_or_none(asset, 'frameHeight')), - 'tbr': int_or_none(text_or_none(asset, 'bitrateVideo')), - 'abr': int_or_none(text_or_none(asset, 'bitrateAudio')), - 'vcodec': text_or_none(asset, 'codecVideo'), - 'acodec': text_or_none(asset, 'codecAudio'), - 'container': text_or_none(asset, 'mediaType'), - 'filesize': int_or_none(text_or_none(asset, 'size')), - } for asset in assets.findall('asset') - if asset.find('downloadUrl') is not None] - + def _extract_formats(self, assets, media_id): + formats = [] + for asset in assets.findall('asset'): + format_url = xpath_text(asset, ['downloadUrl', 'url']) + asset_type = asset.get('type') + if asset_type == 'HDS': + formats.extend(self._extract_f4m_formats( + format_url + '?hdcore=3.2.0', media_id, f4m_id='hds', fatal=False)) + elif asset_type == 'HLS': + formats.extend(self._extract_m3u8_formats( + format_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hds', fatal=False)) + else: + format_info = { + 'ext': xpath_text(asset, 'mediaType'), + 'width': int_or_none(xpath_text(asset, 'frameWidth')), + 'height': int_or_none(xpath_text(asset, 'frameHeight')), + 'tbr': int_or_none(xpath_text(asset, 'bitrateVideo')), + 'abr': int_or_none(xpath_text(asset, 'bitrateAudio')), + 'vcodec': xpath_text(asset, 'codecVideo'), + 'acodec': xpath_text(asset, 'codecAudio'), + 'container': xpath_text(asset, 'mediaType'), + 'filesize': int_or_none(xpath_text(asset, 'size')), + } + format_url = self._proto_relative_url(format_url) + if format_url: + http_format_info = format_info.copy() + http_format_info.update({ + 'url': format_url, + 'format_id': 'http-%s' % asset_type, + }) + formats.append(http_format_info) + server_prefix = xpath_text(asset, 'serverPrefix') + if server_prefix: + rtmp_format_info = format_info.copy() + rtmp_format_info.update({ + 'url': server_prefix, + 'play_path': xpath_text(asset, 'fileName'), + 'format_id': 'rtmp-%s' % asset_type, + }) + formats.append(rtmp_format_info) self._sort_formats(formats) return formats - def _extract_thumbnails(self, variants): + def _extract_thumbnails(self, variants, base_url): thumbnails = [{ - 'url': self._BASE_URL + variant.find('url').text, - 'width': int_or_none(variant.find('width').text), - 'height': int_or_none(variant.find('height').text), - } for variant in variants.findall('variant')] + 'url': base_url + xpath_text(variant, 'url'), + 'width': int_or_none(xpath_text(variant, 'width')), + 'height': int_or_none(xpath_text(variant, 'height')), + } for variant in variants.findall('variant') if xpath_text(variant, 'url')] thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True) return thumbnails diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 4721c2293..c947337f9 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -3,15 +3,14 @@ from __future__ import unicode_literals import re import json -import xml.etree.ElementTree from .common import InfoExtractor from ..compat import ( + compat_etree_fromstring, compat_parse_qs, compat_str, compat_urllib_parse, compat_urllib_parse_urlparse, - compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) @@ -20,12 +19,18 @@ from ..utils import ( ExtractorError, find_xpath_attr, fix_xml_ampersands, + float_or_none, + js_to_json, + int_or_none, + parse_iso8601, + sanitized_Request, unescapeHTML, unsmuggle_url, ) -class BrightcoveIE(InfoExtractor): +class BrightcoveLegacyIE(InfoExtractor): + IE_NAME = 'brightcove:legacy' _VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)' _FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s' @@ -119,7 +124,7 @@ class BrightcoveIE(InfoExtractor): object_str = fix_xml_ampersands(object_str) try: - object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8')) + object_doc = compat_etree_fromstring(object_str.encode('utf-8')) except compat_xml_parse_error: return @@ -245,7 +250,7 @@ class BrightcoveIE(InfoExtractor): def _get_video_info(self, video_id, query_str, query, referer=None): request_url = self._FEDERATED_URL_TEMPLATE % query_str - req = compat_urllib_request.Request(request_url) + req = sanitized_Request(request_url) linkBase = query.get('linkBaseURL') if linkBase is not None: referer = linkBase[0] @@ -346,3 +351,181 @@ class BrightcoveIE(InfoExtractor): if 'url' not in info and not info.get('formats'): raise ExtractorError('Unable to extract video url for %s' % info['id']) return info + + +class BrightcoveNewIE(InfoExtractor): + IE_NAME = 'brightcove:new' + _VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>(?:ref:)?\d+)' + _TESTS = [{ + 'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001', + 'md5': 'c8100925723840d4b0d243f7025703be', + 'info_dict': { + 'id': '4463358922001', + 'ext': 'mp4', + 'title': 'Meet the man behind Popcorn Time', + 'description': 'md5:eac376a4fe366edc70279bfb681aea16', + 'duration': 165.768, + 'timestamp': 1441391203, + 'upload_date': '20150904', + 'uploader_id': '929656772001', + 'formats': 'mincount:22', + }, + }, { + # with rtmp streams + 'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001', + 'info_dict': { + 'id': '4279049078001', + 'ext': 'mp4', + 'title': 'Titansgrave: Chapter 0', + 'description': 'Titansgrave: Chapter 0', + 'duration': 1242.058, + 'timestamp': 1433556729, + 'upload_date': '20150606', + 'uploader_id': '4036320279001', + 'formats': 'mincount:41', + }, + 'params': { + 'skip_download': True, + } + }, { + # ref: prefixed video id + 'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442', + 'only_matching': True, + }] + + @staticmethod + def _extract_url(webpage): + urls = BrightcoveNewIE._extract_urls(webpage) + return urls[0] if urls else None + + @staticmethod + def _extract_urls(webpage): + # Reference: + # 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe + # 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript + # 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html + # 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player + + entries = [] + + # Look for iframe embeds [1] + for _, url in re.findall( + r'<iframe[^>]+src=(["\'])((?:https?:)//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage): + entries.append(url) + + # Look for embed_in_page embeds [2] + for video_id, account_id, player_id, embed in re.findall( + # According to examples from [3] it's unclear whether video id + # may be optional and what to do when it is + # According to [4] data-video-id may be prefixed with ref: + r'''(?sx) + <video[^>]+ + data-video-id=["\']((?:ref:)?\d+)["\'][^>]*>.*? + </video>.*? + <script[^>]+ + src=["\'](?:https?:)?//players\.brightcove\.net/ + (\d+)/([\da-f-]+)_([^/]+)/index\.min\.js + ''', webpage): + entries.append( + 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' + % (account_id, player_id, embed, video_id)) + + return entries + + def _real_extract(self, url): + account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups() + + webpage = self._download_webpage( + 'http://players.brightcove.net/%s/%s_%s/index.min.js' + % (account_id, player_id, embed), video_id) + + policy_key = None + + catalog = self._search_regex( + r'catalog\(({.+?})\);', webpage, 'catalog', default=None) + if catalog: + catalog = self._parse_json( + js_to_json(catalog), video_id, fatal=False) + if catalog: + policy_key = catalog.get('policyKey') + + if not policy_key: + policy_key = self._search_regex( + r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1', + webpage, 'policy key', group='pk') + + req = sanitized_Request( + 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' + % (account_id, video_id), + headers={'Accept': 'application/json;pk=%s' % policy_key}) + json_data = self._download_json(req, video_id) + + title = json_data['name'] + + formats = [] + for source in json_data.get('sources', []): + source_type = source.get('type') + src = source.get('src') + if source_type == 'application/x-mpegURL': + if not src: + continue + formats.extend(self._extract_m3u8_formats( + src, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls', fatal=False)) + else: + streaming_src = source.get('streaming_src') + stream_name, app_name = source.get('stream_name'), source.get('app_name') + if not src and not streaming_src and (not stream_name or not app_name): + continue + tbr = float_or_none(source.get('avg_bitrate'), 1000) + height = int_or_none(source.get('height')) + f = { + 'tbr': tbr, + 'width': int_or_none(source.get('width')), + 'height': height, + 'filesize': int_or_none(source.get('size')), + 'container': source.get('container'), + 'vcodec': source.get('codec'), + 'ext': source.get('container').lower(), + } + + def build_format_id(kind): + format_id = kind + if tbr: + format_id += '-%dk' % int(tbr) + if height: + format_id += '-%dp' % height + return format_id + + if src or streaming_src: + f.update({ + 'url': src or streaming_src, + 'format_id': build_format_id('http' if src else 'http-streaming'), + 'preference': 2 if src else 1, + }) + else: + f.update({ + 'url': app_name, + 'play_path': stream_name, + 'format_id': build_format_id('rtmp'), + }) + formats.append(f) + self._sort_formats(formats) + + description = json_data.get('description') + thumbnail = json_data.get('thumbnail') + timestamp = parse_iso8601(json_data.get('published_at')) + duration = float_or_none(json_data.get('duration'), 1000) + tags = json_data.get('tags', []) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'timestamp': timestamp, + 'uploader_id': account_id, + 'formats': formats, + 'tags': tags, + } diff --git a/youtube_dl/extractor/byutv.py b/youtube_dl/extractor/byutv.py index 3b2de517e..dda98059e 100644 --- a/youtube_dl/extractor/byutv.py +++ b/youtube_dl/extractor/byutv.py @@ -14,9 +14,10 @@ class BYUtvIE(InfoExtractor): 'info_dict': { 'id': 'studio-c-season-5-episode-5', 'ext': 'mp4', - 'description': 'md5:5438d33774b6bdc662f9485a340401cc', + 'description': 'md5:e07269172baff037f8e8bf9956bc9747', 'title': 'Season 5 Episode 5', - 'thumbnail': 're:^https?://.*\.jpg$' + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 1486.486, }, 'params': { 'skip_download': True, diff --git a/youtube_dl/extractor/canal13cl.py b/youtube_dl/extractor/canal13cl.py deleted file mode 100644 index 93241fefe..000000000 --- a/youtube_dl/extractor/canal13cl.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor - - -class Canal13clIE(InfoExtractor): - _VALID_URL = r'^http://(?:www\.)?13\.cl/(?:[^/?#]+/)*(?P<id>[^/?#]+)' - _TEST = { - 'url': 'http://www.13.cl/t13/nacional/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', - 'md5': '4cb1fa38adcad8fea88487a078831755', - 'info_dict': { - 'id': '1403022125', - 'display_id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', - 'ext': 'mp4', - 'title': 'El "círculo de hierro" de Michelle Bachelet en su regreso a La Moneda', - 'description': '(Foto: Agencia Uno) En nueve días más, Michelle Bachelet va a asumir por segunda vez como presidenta de la República. Entre aquellos que la acompañarán hay caras que se repiten y otras que se consolidan en su entorno de colaboradores más cercanos.', - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - display_id = mobj.group('id') - - webpage = self._download_webpage(url, display_id) - - title = self._html_search_meta( - 'twitter:title', webpage, 'title', fatal=True) - description = self._html_search_meta( - 'twitter:description', webpage, 'description') - url = self._html_search_regex( - r'articuloVideo = \"(.*?)\"', webpage, 'url') - real_id = self._search_regex( - r'[^0-9]([0-9]{7,})[^0-9]', url, 'id', default=display_id) - thumbnail = self._html_search_regex( - r'articuloImagen = \"(.*?)\"', webpage, 'thumbnail') - - return { - 'id': real_id, - 'display_id': display_id, - 'url': url, - 'title': title, - 'description': description, - 'ext': 'mp4', - 'thumbnail': thumbnail, - } diff --git a/youtube_dl/extractor/canalc2.py b/youtube_dl/extractor/canalc2.py index c4fefefe4..f6a1ff381 100644 --- a/youtube_dl/extractor/canalc2.py +++ b/youtube_dl/extractor/canalc2.py @@ -4,38 +4,53 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..utils import parse_duration class Canalc2IE(InfoExtractor): IE_NAME = 'canalc2.tv' - _VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)' + _VALID_URL = r'https?://(?:www\.)?canalc2\.tv/video/(?P<id>\d+)' _TEST = { - 'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui', + 'url': 'http://www.canalc2.tv/video/12163', 'md5': '060158428b650f896c542dfbb3d6487f', 'info_dict': { 'id': '12163', - 'ext': 'mp4', - 'title': 'Terrasses du Numérique' + 'ext': 'flv', + 'title': 'Terrasses du Numérique', + 'duration': 122, + }, + 'params': { + 'skip_download': True, # Requires rtmpdump } } def _real_extract(self, url): - video_id = re.match(self._VALID_URL, url).group('id') - # We need to set the voir field for getting the file name - url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id + video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - file_name = self._search_regex( - r"so\.addVariable\('file','(.*?)'\);", - webpage, 'file name') - video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name + video_url = self._search_regex( + r'jwplayer\((["\'])Player\1\)\.setup\({[^}]*file\s*:\s*(["\'])(?P<file>.+?)\2', + webpage, 'video_url', group='file') + formats = [{'url': video_url}] + if video_url.startswith('rtmp://'): + rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url) + formats[0].update({ + 'url': rtmp.group('url'), + 'ext': 'flv', + 'app': rtmp.group('app'), + 'play_path': rtmp.group('play_path'), + 'page_url': url, + }) title = self._html_search_regex( - r'class="evenement8">(.*?)</a>', webpage, 'title') + r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.*?)</h3>', webpage, 'title') + duration = parse_duration(self._search_regex( + r'id=["\']video_duree["\'][^>]*>([^<]+)', + webpage, 'duration', fatal=False)) return { 'id': video_id, - 'ext': 'mp4', - 'url': video_url, 'title': title, + 'duration': duration, + 'formats': formats, } diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py index 57e0cda2c..004372f8d 100644 --- a/youtube_dl/extractor/canalplus.py +++ b/youtube_dl/extractor/canalplus.py @@ -78,7 +78,8 @@ class CanalplusIE(InfoExtractor): if video_id is None: webpage = self._download_webpage(url, display_id) video_id = self._search_regex( - r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id') + [r'<canal:player[^>]+?videoId=(["\'])(?P<id>\d+)', r'id=["\']canal_video_player(?P<id>\d+)'], + webpage, 'video id', group='id') info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id) doc = self._download_xml(info_url, video_id, 'Downloading video XML') diff --git a/youtube_dl/extractor/cbs.py b/youtube_dl/extractor/cbs.py index 75fffb156..40d07ab18 100644 --- a/youtube_dl/extractor/cbs.py +++ b/youtube_dl/extractor/cbs.py @@ -1,6 +1,10 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..utils import ( + sanitized_Request, + smuggle_url, +) class CBSIE(InfoExtractor): @@ -46,13 +50,19 @@ class CBSIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) + request = sanitized_Request(url) + # Android UA is served with higher quality (720p) streams (see + # https://github.com/rg3/youtube-dl/issues/7490) + request.add_header('User-Agent', 'Mozilla/5.0 (Linux; Android 4.4; Nexus 5)') + webpage = self._download_webpage(request, display_id) real_id = self._search_regex( [r"video\.settings\.pid\s*=\s*'([^']+)';", r"cbsplayer\.pid\s*=\s*'([^']+)';"], webpage, 'real video ID') return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', - 'url': 'theplatform:%s' % real_id, + 'url': smuggle_url( + 'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true&manifest=m3u' % real_id, + {'force_smil_url': True}), 'display_id': display_id, } diff --git a/youtube_dl/extractor/cbsnews.py b/youtube_dl/extractor/cbsnews.py index 52e61d85b..f9a64a0a2 100644 --- a/youtube_dl/extractor/cbsnews.py +++ b/youtube_dl/extractor/cbsnews.py @@ -67,9 +67,12 @@ class CBSNewsIE(InfoExtractor): 'format_id': format_id, } if uri.startswith('rtmp'): + play_path = re.sub( + r'{slistFilePath}', '', + uri.split('<break>')[-1].split('{break}')[-1]) fmt.update({ 'app': 'ondemand?auth=cbs', - 'play_path': 'mp4:' + uri.split('<break>')[-1], + 'play_path': 'mp4:' + play_path, 'player_url': 'http://www.cbsnews.com/[[IMPORT]]/vidtech.cbsinteractive.com/player/3_3_0/CBSI_PLAYER_HD.swf', 'page_url': 'http://www.cbsnews.com', 'ext': 'flv', diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py index e857e66f4..6f7b2a70d 100644 --- a/youtube_dl/extractor/ceskatelevize.py +++ b/youtube_dl/extractor/ceskatelevize.py @@ -5,7 +5,6 @@ import re from .common import InfoExtractor from ..compat import ( - compat_urllib_request, compat_urllib_parse, compat_urllib_parse_unquote, compat_urllib_parse_urlparse, @@ -13,6 +12,7 @@ from ..compat import ( from ..utils import ( ExtractorError, float_or_none, + sanitized_Request, ) @@ -100,7 +100,7 @@ class CeskaTelevizeIE(InfoExtractor): 'requestSource': 'iVysilani', } - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist', data=compat_urllib_parse.urlencode(data)) @@ -115,7 +115,7 @@ class CeskaTelevizeIE(InfoExtractor): if playlist_url == 'error_region': raise ExtractorError(NOT_AVAILABLE_STRING, expected=True) - req = compat_urllib_request.Request(compat_urllib_parse_unquote(playlist_url)) + req = sanitized_Request(compat_urllib_parse_unquote(playlist_url)) req.add_header('Referer', url) playlist_title = self._og_search_title(webpage) diff --git a/youtube_dl/extractor/channel9.py b/youtube_dl/extractor/channel9.py index 3dfc24f5b..c74553dcf 100644 --- a/youtube_dl/extractor/channel9.py +++ b/youtube_dl/extractor/channel9.py @@ -3,7 +3,11 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + parse_filesize, + qualities, +) class Channel9IE(InfoExtractor): @@ -28,7 +32,7 @@ class Channel9IE(InfoExtractor): 'title': 'Developer Kick-Off Session: Stuff We Love', 'description': 'md5:c08d72240b7c87fcecafe2692f80e35f', 'duration': 4576, - 'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg', + 'thumbnail': 're:http://.*\.jpg', 'session_code': 'KOS002', 'session_day': 'Day 1', 'session_room': 'Arena 1A', @@ -44,31 +48,29 @@ class Channel9IE(InfoExtractor): 'title': 'Self-service BI with Power BI - nuclear testing', 'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b', 'duration': 1540, - 'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg', + 'thumbnail': 're:http://.*\.jpg', 'authors': ['Mike Wilmot'], }, + }, + { + # low quality mp4 is best + 'url': 'https://channel9.msdn.com/Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library', + 'info_dict': { + 'id': 'Events/CPP/CppCon-2015/Ranges-for-the-Standard-Library', + 'ext': 'mp4', + 'title': 'Ranges for the Standard Library', + 'description': 'md5:2e6b4917677af3728c5f6d63784c4c5d', + 'duration': 5646, + 'thumbnail': 're:http://.*\.jpg', + }, + 'params': { + 'skip_download': True, + }, } ] _RSS_URL = 'http://channel9.msdn.com/%s/RSS' - # Sorted by quality - _known_formats = ['MP3', 'MP4', 'Mid Quality WMV', 'Mid Quality MP4', 'High Quality WMV', 'High Quality MP4'] - - def _restore_bytes(self, formatted_size): - if not formatted_size: - return 0 - m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size) - if not m: - return 0 - units = m.group('units') - try: - exponent = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'].index(units.upper()) - except ValueError: - return 0 - size = float(m.group('size')) - return int(size * (1024 ** exponent)) - def _formats_from_html(self, html): FORMAT_REGEX = r''' (?x) @@ -78,16 +80,20 @@ class Channel9IE(InfoExtractor): <h3>File\s+size</h3>\s*(?P<filesize>.*?)\s* </div>)? # File size part may be missing ''' - # Extract known formats + quality = qualities(( + 'MP3', 'MP4', + 'Low Quality WMV', 'Low Quality MP4', + 'Mid Quality WMV', 'Mid Quality MP4', + 'High Quality WMV', 'High Quality MP4')) formats = [{ 'url': x.group('url'), 'format_id': x.group('quality'), 'format_note': x.group('note'), 'format': '%s (%s)' % (x.group('quality'), x.group('note')), - 'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate - 'preference': self._known_formats.index(x.group('quality')), + 'filesize_approx': parse_filesize(x.group('filesize')), + 'quality': quality(x.group('quality')), 'vcodec': 'none' if x.group('note') == 'Audio only' else None, - } for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats] + } for x in list(re.finditer(FORMAT_REGEX, html))] self._sort_formats(formats) @@ -158,7 +164,7 @@ class Channel9IE(InfoExtractor): def _extract_session_day(self, html): m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html) - return m.group('day') if m is not None else None + return m.group('day').strip() if m is not None else None def _extract_session_room(self, html): m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html) @@ -224,12 +230,12 @@ class Channel9IE(InfoExtractor): if contents is None: return contents - authors = self._extract_authors(html) + if len(contents) > 1: + raise ExtractorError('Got more than one entry') + result = contents[0] + result['authors'] = self._extract_authors(html) - for content in contents: - content['authors'] = authors - - return contents + return result def _extract_session(self, html, content_path): contents = self._extract_content(html, content_path) diff --git a/youtube_dl/extractor/chaturbate.py b/youtube_dl/extractor/chaturbate.py new file mode 100644 index 000000000..242fba311 --- /dev/null +++ b/youtube_dl/extractor/chaturbate.py @@ -0,0 +1,59 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class ChaturbateIE(InfoExtractor): + _VALID_URL = r'https?://(?:[^/]+\.)?chaturbate\.com/(?P<id>[^/?#]+)' + _TESTS = [{ + 'url': 'https://www.chaturbate.com/siswet19/', + 'info_dict': { + 'id': 'siswet19', + 'ext': 'mp4', + 'title': 're:^siswet19 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'age_limit': 18, + 'is_live': True, + }, + 'params': { + 'skip_download': True, + } + }, { + 'url': 'https://en.chaturbate.com/siswet19/', + 'only_matching': True, + }] + + _ROOM_OFFLINE = 'Room is currently offline' + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + m3u8_url = self._search_regex( + r'src=(["\'])(?P<url>http.+?\.m3u8.*?)\1', webpage, + 'playlist', default=None, group='url') + + if not m3u8_url: + error = self._search_regex( + [r'<span[^>]+class=(["\'])desc_span\1[^>]*>(?P<error>[^<]+)</span>', + r'<div[^>]+id=(["\'])defchat\1[^>]*>\s*<p><strong>(?P<error>[^<]+)<'], + webpage, 'error', group='error', default=None) + if not error: + if any(p not in webpage for p in ( + self._ROOM_OFFLINE, 'offline_tipping', 'tip_offline')): + error = self._ROOM_OFFLINE + if error: + raise ExtractorError(error, expected=True) + raise ExtractorError('Unable to find stream URL') + + formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4') + + return { + 'id': video_id, + 'title': self._live_title(video_id), + 'thumbnail': 'https://cdn-s.highwebmedia.com/uHK3McUtGCG3SMFcd4ZJsRv8/roomimage/%s.jpg' % video_id, + 'age_limit': self._rta_search(webpage), + 'is_live': True, + 'formats': formats, + } diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py index fd1770dac..6d9cd8abd 100644 --- a/youtube_dl/extractor/cinemassacre.py +++ b/youtube_dl/extractor/cinemassacre.py @@ -5,7 +5,6 @@ import re from .common import InfoExtractor from ..utils import ExtractorError -from .bliptv import BlipTVIE from .screenwavemedia import ScreenwaveMediaIE @@ -34,18 +33,17 @@ class CinemassacreIE(InfoExtractor): }, }, { - # blip.tv embedded video + # Youtube embedded video 'url': 'http://cinemassacre.com/2006/12/07/chronologically-confused-about-bad-movie-and-video-game-sequel-titles/', - 'md5': 'ca9b3c8dd5a66f9375daeb5135f5a3de', + 'md5': 'df4cf8a1dcedaec79a73d96d83b99023', 'info_dict': { - 'id': '4065369', - 'ext': 'flv', + 'id': 'OEVzPCY2T-g', + 'ext': 'mp4', 'title': 'AVGN: Chronologically Confused about Bad Movie and Video Game Sequel Titles', 'upload_date': '20061207', - 'uploader': 'cinemassacre', - 'uploader_id': '250778', - 'timestamp': 1283233867, - 'description': 'md5:0a108c78d130676b207d0f6d029ecffd', + 'uploader': 'Cinemassacre', + 'uploader_id': 'JamesNintendoNerd', + 'description': 'md5:784734696c2b8b7f4b8625cc799e07f6', } }, { @@ -89,8 +87,6 @@ class CinemassacreIE(InfoExtractor): ], webpage, 'player data URL', default=None, group='url') if not playerdata_url: - playerdata_url = BlipTVIE._extract_url(webpage) - if not playerdata_url: raise ExtractorError('Unable to find player data') video_title = self._html_search_regex( diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py index 7af903571..3a47f6fa4 100644 --- a/youtube_dl/extractor/clipfish.py +++ b/youtube_dl/extractor/clipfish.py @@ -1,14 +1,9 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor from ..utils import ( - determine_ext, int_or_none, - js_to_json, - parse_iso8601, - remove_end, + unified_strdate, ) @@ -21,48 +16,47 @@ class ClipfishIE(InfoExtractor): 'id': '3966754', 'ext': 'mp4', 'title': 'FIFA 14 - E3 2013 Trailer', - 'timestamp': 1370938118, + 'description': 'Video zu FIFA 14: E3 2013 Trailer', 'upload_date': '20130611', 'duration': 82, + 'view_count': int, } } def _real_extract(self, url): video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - video_info = self._parse_json( - js_to_json(self._html_search_regex( - '(?s)videoObject\s*=\s*({.+?});', webpage, 'video object')), - video_id) + video_info = self._download_json( + 'http://www.clipfish.de/devapi/id/%s?format=json&apikey=hbbtv' % video_id, + video_id)['items'][0] formats = [] - for video_url in re.findall(r'var\s+videourl\s*=\s*"([^"]+)"', webpage): - ext = determine_ext(video_url) - if ext == 'm3u8': - formats.append({ - 'url': video_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'), - 'ext': 'mp4', - 'format_id': 'hls', - }) - else: - formats.append({ - 'url': video_url, - 'format_id': ext, - }) - self._sort_formats(formats) - title = remove_end(self._og_search_title(webpage), ' - Video') - thumbnail = self._og_search_thumbnail(webpage) - duration = int_or_none(video_info.get('length')) - timestamp = parse_iso8601(self._html_search_meta('uploadDate', webpage, 'upload date')) + m3u8_url = video_info.get('media_videourl_hls') + if m3u8_url: + formats.append({ + 'url': m3u8_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'), + 'ext': 'mp4', + 'format_id': 'hls', + }) + + mp4_url = video_info.get('media_videourl') + if mp4_url: + formats.append({ + 'url': mp4_url, + 'format_id': 'mp4', + 'width': int_or_none(video_info.get('width')), + 'height': int_or_none(video_info.get('height')), + 'tbr': int_or_none(video_info.get('bitrate')), + }) return { 'id': video_id, - 'title': title, + 'title': video_info['title'], + 'description': video_info.get('descr'), 'formats': formats, - 'thumbnail': thumbnail, - 'duration': duration, - 'timestamp': timestamp, + 'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'), + 'duration': int_or_none(video_info.get('media_length')), + 'upload_date': unified_strdate(video_info.get('pubDate')), + 'view_count': int_or_none(video_info.get('media_views')) } diff --git a/youtube_dl/extractor/cliphunter.py b/youtube_dl/extractor/cliphunter.py index d46592cc5..2996b6b09 100644 --- a/youtube_dl/extractor/cliphunter.py +++ b/youtube_dl/extractor/cliphunter.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import determine_ext +from ..utils import int_or_none _translation_table = { @@ -42,31 +42,26 @@ class CliphunterIE(InfoExtractor): video_title = self._search_regex( r'mediaTitle = "([^"]+)"', webpage, 'title') - fmts = {} - for fmt in ('mp4', 'flv'): - fmt_list = self._parse_json(self._search_regex( - r'var %sjson\s*=\s*(\[.*?\]);' % fmt, webpage, '%s formats' % fmt), video_id) - for f in fmt_list: - fmts[f['fname']] = _decode(f['sUrl']) - - qualities = self._parse_json(self._search_regex( - r'var player_btns\s*=\s*(.*?);\n', webpage, 'quality info'), video_id) + gexo_files = self._parse_json( + self._search_regex( + r'var\s+gexoFiles\s*=\s*({.+?});', webpage, 'gexo files'), + video_id) formats = [] - for fname, url in fmts.items(): - f = { - 'url': url, - } - if fname in qualities: - qual = qualities[fname] - f.update({ - 'format_id': '%s_%sp' % (determine_ext(url), qual['h']), - 'width': qual['w'], - 'height': qual['h'], - 'tbr': qual['br'], - }) - formats.append(f) - + for format_id, f in gexo_files.items(): + video_url = f.get('url') + if not video_url: + continue + fmt = f.get('fmt') + height = f.get('h') + format_id = '%s_%sp' % (fmt, height) if fmt and height else format_id + formats.append({ + 'url': _decode(video_url), + 'format_id': format_id, + 'width': int_or_none(f.get('w')), + 'height': int_or_none(height), + 'tbr': int_or_none(f.get('br')), + }) self._sort_formats(formats) thumbnail = self._search_regex( diff --git a/youtube_dl/extractor/clyp.py b/youtube_dl/extractor/clyp.py new file mode 100644 index 000000000..57e643799 --- /dev/null +++ b/youtube_dl/extractor/clyp.py @@ -0,0 +1,57 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + float_or_none, + parse_iso8601, +) + + +class ClypIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?clyp\.it/(?P<id>[a-z0-9]+)' + _TEST = { + 'url': 'https://clyp.it/ojz2wfah', + 'md5': '1d4961036c41247ecfdcc439c0cddcbb', + 'info_dict': { + 'id': 'ojz2wfah', + 'ext': 'mp3', + 'title': 'Krisson80 - bits wip wip', + 'description': '#Krisson80BitsWipWip #chiptune\n#wip', + 'duration': 263.21, + 'timestamp': 1443515251, + 'upload_date': '20150929', + }, + } + + def _real_extract(self, url): + audio_id = self._match_id(url) + + metadata = self._download_json( + 'https://api.clyp.it/%s' % audio_id, audio_id) + + formats = [] + for secure in ('', 'Secure'): + for ext in ('Ogg', 'Mp3'): + format_id = '%s%s' % (secure, ext) + format_url = metadata.get('%sUrl' % format_id) + if format_url: + formats.append({ + 'url': format_url, + 'format_id': format_id, + 'vcodec': 'none', + }) + self._sort_formats(formats) + + title = metadata['Title'] + description = metadata.get('Description') + duration = float_or_none(metadata.get('Duration')) + timestamp = parse_iso8601(metadata.get('DateCreated')) + + return { + 'id': audio_id, + 'title': title, + 'description': description, + 'duration': duration, + 'timestamp': timestamp, + 'formats': formats, + } diff --git a/youtube_dl/extractor/cmt.py b/youtube_dl/extractor/cmt.py index e96c59f71..f1311b14f 100644 --- a/youtube_dl/extractor/cmt.py +++ b/youtube_dl/extractor/cmt.py @@ -4,7 +4,7 @@ from .mtv import MTVIE class CMTIE(MTVIE): IE_NAME = 'cmt.com' - _VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml' + _VALID_URL = r'https?://www\.cmt\.com/(?:videos|shows)/(?:[^/]+/)*(?P<videoid>\d+)' _FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/' _TESTS = [{ @@ -16,4 +16,7 @@ class CMTIE(MTVIE): 'title': 'Garth Brooks - "The Call (featuring Trisha Yearwood)"', 'description': 'Blame It All On My Roots', }, + }, { + 'url': 'http://www.cmt.com/shows/party-down-south/party-down-south-ep-407-gone-girl/1738172/playlist/#id=1738172', + 'only_matching': True, }] diff --git a/youtube_dl/extractor/cnet.py b/youtube_dl/extractor/cnet.py index 5dd69bff7..5c3908f72 100644 --- a/youtube_dl/extractor/cnet.py +++ b/youtube_dl/extractor/cnet.py @@ -1,15 +1,11 @@ # coding: utf-8 from __future__ import unicode_literals -import json +from .theplatform import ThePlatformIE +from ..utils import int_or_none -from .common import InfoExtractor -from ..utils import ( - ExtractorError, -) - -class CNETIE(InfoExtractor): +class CNETIE(ThePlatformIE): _VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/' _TESTS = [{ 'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/', @@ -18,25 +14,20 @@ class CNETIE(InfoExtractor): 'ext': 'flv', 'title': 'Hands-on with Microsoft Windows 8.1 Update', 'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.', - 'thumbnail': 're:^http://.*/flmswindows8.jpg$', 'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861', 'uploader': 'Sarah Mitroff', + 'duration': 70, }, - 'params': { - 'skip_download': 'requires rtmpdump', - } }, { 'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/', 'info_dict': { 'id': '56527b93-d25d-44e3-b738-f989ce2e49ba', 'ext': 'flv', + 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)', 'description': 'Khail and Ashley wonder what other civic woes can be solved by self-tweeting objects, investigate a new kind of VR camera and watch an origami robot self-assemble, walk, climb, dig and dissolve. #TDPothole', 'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40', 'uploader': 'Ashley Esqueda', - 'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)', - }, - 'params': { - 'skip_download': True, # requires rtmpdump + 'duration': 1482, }, }] @@ -45,26 +36,13 @@ class CNETIE(InfoExtractor): webpage = self._download_webpage(url, display_id) data_json = self._html_search_regex( - r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'", + r"data-cnet-video(?:-uvp)?-options='([^']+)'", webpage, 'data json') - data = json.loads(data_json) - vdata = data['video'] - if not vdata: - vdata = data['videos'][0] - if not vdata: - raise ExtractorError('Cannot find video data') - - mpx_account = data['config']['players']['default']['mpx_account'] - vid = vdata['files'].get('rtmp', vdata['files']['hds']) - tp_link = 'http://link.theplatform.com/s/%s/%s' % (mpx_account, vid) + data = self._parse_json(data_json, display_id) + vdata = data.get('video') or data['videos'][0] video_id = vdata['id'] - title = vdata.get('headline') - if title is None: - title = vdata.get('title') - if title is None: - raise ExtractorError('Cannot find title!') - thumbnail = vdata.get('image', {}).get('path') + title = vdata['title'] author = vdata.get('author') if author: uploader = '%s %s' % (author['firstName'], author['lastName']) @@ -73,13 +51,34 @@ class CNETIE(InfoExtractor): uploader = None uploader_id = None + mpx_account = data['config']['uvpConfig']['default']['mpx_account'] + + metadata = self.get_metadata('%s/%s' % (mpx_account, list(vdata['files'].values())[0]), video_id) + description = vdata.get('description') or metadata.get('description') + duration = int_or_none(vdata.get('duration')) or metadata.get('duration') + + formats = [] + subtitles = {} + for (fkey, vid) in vdata['files'].items(): + if fkey == 'hls_phone' and 'hls_tablet' in vdata['files']: + continue + release_url = 'http://link.theplatform.com/s/%s/%s?format=SMIL&mbr=true' % (mpx_account, vid) + if fkey == 'hds': + release_url += '&manifest=f4m' + tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % fkey) + formats.extend(tp_formats) + subtitles = self._merge_subtitles(subtitles, tp_subtitles) + self._sort_formats(formats) + return { - '_type': 'url_transparent', - 'url': tp_link, 'id': video_id, 'display_id': display_id, 'title': title, + 'description': description, + 'thumbnail': metadata.get('thumbnail'), + 'duration': duration, 'uploader': uploader, 'uploader_id': uploader_id, - 'thumbnail': thumbnail, + 'subtitles': subtitles, + 'formats': formats, } diff --git a/youtube_dl/extractor/collegerama.py b/youtube_dl/extractor/collegerama.py index fedd48490..40667a0f1 100644 --- a/youtube_dl/extractor/collegerama.py +++ b/youtube_dl/extractor/collegerama.py @@ -3,10 +3,10 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import compat_urllib_request from ..utils import ( float_or_none, int_or_none, + sanitized_Request, ) @@ -52,7 +52,7 @@ class CollegeRamaIE(InfoExtractor): } } - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions', json.dumps(player_options_request)) request.add_header('Content-Type', 'application/json') diff --git a/youtube_dl/extractor/comcarcoff.py b/youtube_dl/extractor/comcarcoff.py index 81f3d7697..2efa200b5 100644 --- a/youtube_dl/extractor/comcarcoff.py +++ b/youtube_dl/extractor/comcarcoff.py @@ -1,10 +1,12 @@ # encoding: utf-8 from __future__ import unicode_literals -import json - from .common import InfoExtractor -from ..utils import parse_iso8601 +from ..utils import ( + int_or_none, + parse_duration, + parse_iso8601, +) class ComCarCoffIE(InfoExtractor): @@ -16,6 +18,7 @@ class ComCarCoffIE(InfoExtractor): 'ext': 'mp4', 'upload_date': '20141127', 'timestamp': 1417107600, + 'duration': 1232, 'title': 'Happy Thanksgiving Miranda', 'description': 'Jerry Seinfeld and his special guest Miranda Sings cruise around town in search of coffee, complaining and apologizing along the way.', 'thumbnail': 'http://ccc.crackle.com/images/s5e4_thumb.jpg', @@ -31,9 +34,10 @@ class ComCarCoffIE(InfoExtractor): display_id = 'comediansincarsgettingcoffee.com' webpage = self._download_webpage(url, display_id) - full_data = json.loads(self._search_regex( - r'<script type="application/json" id="videoData">(?P<json>.+?)</script>', - webpage, 'full data json')) + full_data = self._parse_json( + self._search_regex( + r'window\.app\s*=\s*({.+?});\n', webpage, 'full data json'), + display_id)['videoData'] video_id = full_data['activeVideo']['video'] video_data = full_data.get('videos', {}).get(video_id) or full_data['singleshots'][video_id] @@ -45,12 +49,18 @@ class ComCarCoffIE(InfoExtractor): formats = self._extract_m3u8_formats( video_data['mediaUrl'], video_id, ext='mp4') + timestamp = int_or_none(video_data.get('pubDateTime')) or parse_iso8601( + video_data.get('pubDate')) + duration = int_or_none(video_data.get('durationSeconds')) or parse_duration( + video_data.get('duration')) + return { 'id': video_id, 'display_id': display_id, 'title': video_data['title'], 'description': video_data.get('description'), - 'timestamp': parse_iso8601(video_data.get('pubDate')), + 'timestamp': timestamp, + 'duration': duration, 'thumbnails': thumbnails, 'formats': formats, 'webpage_url': 'http://comediansincarsgettingcoffee.com/%s' % (video_data.get('urlSlug', video_data.get('slug'))), diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py index 91ebb0ce5..3e4bd10b6 100644 --- a/youtube_dl/extractor/comedycentral.py +++ b/youtube_dl/extractor/comedycentral.py @@ -151,12 +151,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor): mobj = re.match(self._VALID_URL, url) if mobj.group('shortname'): - if mobj.group('shortname') in ('tds', 'thedailyshow'): - url = 'http://thedailyshow.cc.com/full-episodes/' - else: - url = 'http://thecolbertreport.cc.com/full-episodes/' - mobj = re.match(self._VALID_URL, url, re.VERBOSE) - assert mobj is not None + return self.url_result('http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes') if mobj.group('clip'): if mobj.group('videotitle'): diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 1e7db8a9b..655207447 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -10,20 +10,17 @@ import re import socket import sys import time -import xml.etree.ElementTree from ..compat import ( compat_cookiejar, compat_cookies, compat_getpass, - compat_HTTPError, compat_http_client, compat_urllib_error, compat_urllib_parse, - compat_urllib_parse_urlparse, - compat_urllib_request, compat_urlparse, compat_str, + compat_etree_fromstring, ) from ..utils import ( NO_DEFAULT, @@ -32,16 +29,20 @@ from ..utils import ( clean_html, compiled_regex_type, determine_ext, + error_to_compat_str, ExtractorError, fix_xml_ampersands, float_or_none, int_or_none, RegexNotFoundError, sanitize_filename, + sanitized_Request, unescapeHTML, + unified_strdate, url_basename, xpath_text, xpath_with_ns, + determine_protocol, ) @@ -152,6 +153,7 @@ class InfoExtractor(object): description: Full video description. uploader: Full name of the video uploader. creator: The main artist who created the video. + release_date: The date (YYYYMMDD) when the video was released. timestamp: UNIX timestamp of the moment the video became available. upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. @@ -163,12 +165,14 @@ class InfoExtractor(object): with the "ext" entry and one of: * "data": The subtitles file contents * "url": A URL pointing to the subtitles file + "ext" will be calculated from URL if missing automatic_captions: Like 'subtitles', used by the YoutubeIE for automatically generated captions - duration: Length of the video in seconds, as an integer. + duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video + repost_count: Number of reposts of the video average_rating: Average rating give by users, the scale used depends on the webpage comment_count: Number of comments on the video comments: A list of comments, each with one or more of the following @@ -307,11 +311,11 @@ class InfoExtractor(object): @classmethod def ie_key(cls): """A string for getting the InfoExtractor with get_info_extractor""" - return cls.__name__[:-2] + return compat_str(cls.__name__[:-2]) @property def IE_NAME(self): - return type(self).__name__[:-2] + return compat_str(type(self).__name__[:-2]) def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True): """ Returns the response handle """ @@ -329,7 +333,8 @@ class InfoExtractor(object): return False if errnote is None: errnote = 'Unable to download webpage' - errmsg = '%s: %s' % (errnote, compat_str(err)) + + errmsg = '%s: %s' % (errnote, error_to_compat_str(err)) if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: @@ -458,7 +463,7 @@ class InfoExtractor(object): return xml_string if transform_source: xml_string = transform_source(xml_string) - return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8')) + return compat_etree_fromstring(xml_string.encode('utf-8')) def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', @@ -619,7 +624,7 @@ class InfoExtractor(object): else: raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) except (IOError, netrc.NetrcParseError) as err: - self._downloader.report_warning('parsing .netrc: %s' % compat_str(err)) + self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err)) return (username, password) @@ -642,8 +647,9 @@ class InfoExtractor(object): # Helper functions for extracting OpenGraph info @staticmethod def _og_regexes(prop): - content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')' - property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop) + content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))' + property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)' + % {'prop': re.escape(prop)}) template = r'<meta[^>]+?%s[^>]+?%s' return [ template % (property_re, content_re), @@ -772,14 +778,12 @@ class InfoExtractor(object): preference = f.get('preference') if preference is None: - proto = f.get('protocol') - if proto is None: - proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme - - preference = 0 if proto in ['http', 'https'] else -0.1 + preference = 0 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported preference -= 0.5 + proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1 + if f.get('vcodec') == 'none': # audio only if self._downloader.params.get('prefer_free_formats'): ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] @@ -810,6 +814,7 @@ class InfoExtractor(object): f.get('vbr') if f.get('vbr') is not None else -1, f.get('height') if f.get('height') is not None else -1, f.get('width') if f.get('width') is not None else -1, + proto_preference, ext_preference, f.get('abr') if f.get('abr') is not None else -1, audio_ext_preference, @@ -837,7 +842,7 @@ class InfoExtractor(object): self._request_webpage(url, video_id, 'Checking %s URL' % item) return True except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError): + if isinstance(e.cause, compat_urllib_error.URLError): self.to_screen( '%s: %s URL is invalid, skipping' % (video_id, item)) return False @@ -868,13 +873,18 @@ class InfoExtractor(object): time.sleep(timeout) def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None, - transform_source=lambda s: fix_xml_ampersands(s).strip()): + transform_source=lambda s: fix_xml_ampersands(s).strip(), + fatal=True): manifest = self._download_xml( manifest_url, video_id, 'Downloading f4m manifest', 'Unable to download f4m manifest', # Some manifests may be malformed, e.g. prosiebensat1 generated manifests # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244) - transform_source=transform_source) + transform_source=transform_source, + fatal=fatal) + + if manifest is False: + return [] formats = [] manifest_version = '1.0' @@ -882,6 +892,11 @@ class InfoExtractor(object): if not media_nodes: manifest_version = '2.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media') + base_url = xpath_text( + manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'], + 'base URL', default=None) + if base_url: + base_url = base_url.strip() for i, media_el in enumerate(media_nodes): if manifest_version == '2.0': media_url = media_el.attrib.get('href') or media_el.attrib.get('url') @@ -889,13 +904,14 @@ class InfoExtractor(object): continue manifest_url = ( media_url if media_url.startswith('http://') or media_url.startswith('https://') - else ('/'.join(manifest_url.split('/')[:-1]) + '/' + media_url)) + else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url)) # If media_url is itself a f4m manifest do the recursive extraction # since bitrates in parent manifest (this one) and media_url manifest # may differ leading to inability to resolve the format by requested # bitrate in f4m downloader if determine_ext(manifest_url) == 'f4m': - formats.extend(self._extract_f4m_formats(manifest_url, video_id, preference, f4m_id)) + formats.extend(self._extract_f4m_formats( + manifest_url, video_id, preference, f4m_id, fatal=fatal)) continue tbr = int_or_none(media_el.attrib.get('bitrate')) formats.append({ @@ -931,13 +947,15 @@ class InfoExtractor(object): if re.match(r'^https?://', u) else compat_urlparse.urljoin(m3u8_url, u)) - m3u8_doc = self._download_webpage( + res = self._download_webpage_handle( m3u8_url, video_id, note=note or 'Downloading m3u8 information', errnote=errnote or 'Failed to download m3u8 information', fatal=fatal) - if m3u8_doc is False: - return m3u8_doc + if res is False: + return [] + m3u8_doc, urlh = res + m3u8_url = urlh.geturl() last_info = None last_media = None kv_rex = re.compile( @@ -1043,6 +1061,7 @@ class InfoExtractor(object): video_id = os.path.splitext(url_basename(smil_url))[0] title = None description = None + upload_date = None for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): name = meta.attrib.get('name') content = meta.attrib.get('content') @@ -1052,11 +1071,22 @@ class InfoExtractor(object): title = content elif not description and name in ('description', 'abstract'): description = content + elif not upload_date and name == 'date': + upload_date = unified_strdate(content) + + thumbnails = [{ + 'id': image.get('type'), + 'url': image.get('src'), + 'width': int_or_none(image.get('width')), + 'height': int_or_none(image.get('height')), + } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')] return { 'id': video_id, 'title': title or video_id, 'description': description, + 'upload_date': upload_date, + 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, } @@ -1083,7 +1113,7 @@ class InfoExtractor(object): if not src: continue - bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000) + bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000) filesize = int_or_none(video.get('size') or video.get('fileSize')) width = int_or_none(video.get('width')) height = int_or_none(video.get('height')) @@ -1116,7 +1146,7 @@ class InfoExtractor(object): if proto == 'm3u8' or src_ext == 'm3u8': formats.extend(self._extract_m3u8_formats( - src_url, video_id, ext or 'mp4', m3u8_id='hls')) + src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)) continue if src_ext == 'f4m': @@ -1128,10 +1158,10 @@ class InfoExtractor(object): } f4m_url += '&' if '?' in f4m_url else '?' f4m_url += compat_urllib_parse.urlencode(f4m_params) - formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds')) + formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)) continue - if src_url.startswith('http'): + if src_url.startswith('http') and self._is_valid_url(src, video_id): http_count += 1 formats.append({ 'url': src_url, @@ -1250,7 +1280,7 @@ class InfoExtractor(object): def _get_cookies(self, url): """ Return a compat_cookies.SimpleCookie with the cookies for the url """ - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) self._downloader.cookiejar.add_cookie_header(req) return compat_cookies.SimpleCookie(req.get_header('Cookie')) diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py index d6949ca28..6f92ae2ed 100644 --- a/youtube_dl/extractor/condenast.py +++ b/youtube_dl/extractor/condenast.py @@ -11,6 +11,7 @@ from ..compat import ( ) from ..utils import ( orderedSet, + remove_end, ) @@ -44,12 +45,12 @@ class CondeNastIE(InfoExtractor): 'wmagazine': 'W Magazine', } - _VALID_URL = r'http://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys()) + _VALID_URL = r'http://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed(?:js)?)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys()) IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) - EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys()) + EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed(?:js)?)/.+?' % '|'.join(_SITES.keys()) - _TEST = { + _TESTS = [{ 'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', 'md5': '1921f713ed48aabd715691f774c451f7', 'info_dict': { @@ -58,7 +59,16 @@ class CondeNastIE(InfoExtractor): 'title': '3D Printed Speakers Lit With LED', 'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', } - } + }, { + # JS embed + 'url': 'http://player.cnevids.com/embedjs/55f9cf8b61646d1acf00000c/5511d76261646d5566020000.js', + 'md5': 'f1a6f9cafb7083bab74a710f65d08999', + 'info_dict': { + 'id': '55f9cf8b61646d1acf00000c', + 'ext': 'mp4', + 'title': '3D printed TSA Travel Sentry keys really do open TSA locks', + } + }] def _extract_series(self, url, webpage): title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>', @@ -122,6 +132,13 @@ class CondeNastIE(InfoExtractor): url_type = mobj.group('type') item_id = mobj.group('id') + # Convert JS embed to regular embed + if url_type == 'embedjs': + parsed_url = compat_urlparse.urlparse(url) + url = compat_urlparse.urlunparse(parsed_url._replace( + path=remove_end(parsed_url.path, '.js').replace('/embedjs/', '/embed/'))) + url_type = 'embed' + self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site]) webpage = self._download_webpage(url, item_id) diff --git a/youtube_dl/extractor/criterion.py b/youtube_dl/extractor/criterion.py index 4fb178165..dedb810a0 100644 --- a/youtube_dl/extractor/criterion.py +++ b/youtube_dl/extractor/criterion.py @@ -27,9 +27,7 @@ class CriterionIE(InfoExtractor): final_url = self._search_regex( r'so.addVariable\("videoURL", "(.+?)"\)\;', webpage, 'video url') title = self._og_search_title(webpage) - description = self._html_search_regex( - r'<meta name="description" content="(.+?)" />', - webpage, 'video description') + description = self._html_search_meta('description', webpage) thumbnail = self._search_regex( r'so.addVariable\("thumbnailURL", "(.+?)"\)\;', webpage, 'thumbnail url') diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py index 95952bc29..00d943f77 100644 --- a/youtube_dl/extractor/crunchyroll.py +++ b/youtube_dl/extractor/crunchyroll.py @@ -5,12 +5,12 @@ import re import json import base64 import zlib -import xml.etree.ElementTree from hashlib import sha1 from math import pow, sqrt, floor from .common import InfoExtractor from ..compat import ( + compat_etree_fromstring, compat_urllib_parse, compat_urllib_parse_unquote, compat_urllib_request, @@ -21,7 +21,9 @@ from ..utils import ( bytes_to_intlist, intlist_to_bytes, int_or_none, + lowercase_escape, remove_end, + sanitized_Request, unified_strdate, urlencode_postdata, xpath_text, @@ -32,9 +34,29 @@ from ..aes import ( class CrunchyrollBaseIE(InfoExtractor): + _NETRC_MACHINE = 'crunchyroll' + + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + self.report_login() + login_url = 'https://www.crunchyroll.com/?a=formhandler' + data = urlencode_postdata({ + 'formname': 'RpcApiUser_Login', + 'name': username, + 'password': password, + }) + login_request = sanitized_Request(login_url, data) + login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') + self._download_webpage(login_request, None, False, 'Wrong login info') + + def _real_initialize(self): + self._login() + def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None): request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request) - else compat_urllib_request.Request(url_or_request)) + else sanitized_Request(url_or_request)) # Accept-Language must be set explicitly to accept any language to avoid issues # similar to https://github.com/rg3/youtube-dl/issues/6797. # Along with IP address Crunchyroll uses Accept-Language to guess whether georestriction @@ -46,10 +68,22 @@ class CrunchyrollBaseIE(InfoExtractor): return super(CrunchyrollBaseIE, self)._download_webpage( request, video_id, note, errnote, fatal, tries, timeout, encoding) + @staticmethod + def _add_skip_wall(url): + parsed_url = compat_urlparse.urlparse(url) + qs = compat_urlparse.parse_qs(parsed_url.query) + # Always force skip_wall to bypass maturity wall, namely 18+ confirmation message: + # > This content may be inappropriate for some people. + # > Are you sure you want to continue? + # since it's not disabled by default in crunchyroll account's settings. + # See https://github.com/rg3/youtube-dl/issues/7202. + qs['skip_wall'] = ['1'] + return compat_urlparse.urlunparse( + parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True))) + class CrunchyrollIE(CrunchyrollBaseIE): _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|[^/]*/[^/?&]*?)(?P<video_id>[0-9]+))(?:[/?&]|$)' - _NETRC_MACHINE = 'crunchyroll' _TESTS = [{ 'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513', 'info_dict': { @@ -72,7 +106,7 @@ class CrunchyrollIE(CrunchyrollBaseIE): 'id': '589804', 'ext': 'flv', 'title': 'Culture Japan Episode 1 – Rebuilding Japan after the 3.11', - 'description': 'md5:fe2743efedb49d279552926d0bd0cd9e', + 'description': 'md5:2fbc01f90b87e8e9137296f37b461c12', 'thumbnail': 're:^https?://.*\.jpg$', 'uploader': 'Danny Choo Network', 'upload_date': '20120213', @@ -81,10 +115,13 @@ class CrunchyrollIE(CrunchyrollBaseIE): # rtmp 'skip_download': True, }, - }, { 'url': 'http://www.crunchyroll.fr/girl-friend-beta/episode-11-goodbye-la-mode-661697', 'only_matching': True, + }, { + # geo-restricted (US), 18+ maturity wall, non-premium available + 'url': 'http://www.crunchyroll.com/cosplay-complex-ova/episode-1-the-birth-of-the-cosplay-club-565617', + 'only_matching': True, }] _FORMAT_IDS = { @@ -94,24 +131,6 @@ class CrunchyrollIE(CrunchyrollBaseIE): '1080': ('80', '108'), } - def _login(self): - (username, password) = self._get_login_info() - if username is None: - return - self.report_login() - login_url = 'https://www.crunchyroll.com/?a=formhandler' - data = urlencode_postdata({ - 'formname': 'RpcApiUser_Login', - 'name': username, - 'password': password, - }) - login_request = compat_urllib_request.Request(login_url, data) - login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') - self._download_webpage(login_request, None, False, 'Wrong login info') - - def _real_initialize(self): - self._login() - def _decrypt_subtitles(self, data, iv, id): data = bytes_to_intlist(base64.b64decode(data.encode('utf-8'))) iv = bytes_to_intlist(base64.b64decode(iv.encode('utf-8'))) @@ -217,7 +236,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text return output def _extract_subtitles(self, subtitle): - sub_root = xml.etree.ElementTree.fromstring(subtitle) + sub_root = compat_etree_fromstring(subtitle) return [{ 'ext': 'srt', 'data': self._convert_subtitles_to_srt(sub_root), @@ -228,7 +247,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text def _get_subtitles(self, video_id, webpage): subtitles = {} - for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage): + for sub_id, sub_name in re.findall(r'\bssid=([0-9]+)"[^>]+?\btitle="([^"]+)', webpage): sub_page = self._download_webpage( 'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id, video_id, note='Downloading subtitles for ' + sub_name) @@ -254,7 +273,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text else: webpage_url = 'http://www.' + mobj.group('url') - webpage = self._download_webpage(webpage_url, video_id, 'Downloading webpage') + webpage = self._download_webpage(self._add_skip_wall(webpage_url), video_id, 'Downloading webpage') note_m = self._html_search_regex( r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, 'trailer-notice', default='') @@ -270,11 +289,15 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text if 'To view this, please log in to verify you are 18 or older.' in webpage: self.raise_login_required() - video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL) + video_title = self._html_search_regex( + r'(?s)<h1[^>]*>((?:(?!<h1).)*?<span[^>]+itemprop=["\']title["\'][^>]*>(?:(?!<h1).)+?)</h1>', + webpage, 'video_title') video_title = re.sub(r' {2,}', ' ', video_title) - video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='') - if not video_description: - video_description = None + video_description = self._html_search_regex( + r'<script[^>]*>\s*.+?\[media_id=%s\].+?"description"\s*:\s*"([^"]+)' % video_id, + webpage, 'description', default=None) + if video_description: + video_description = lowercase_escape(video_description.replace(r'\r\n', '\n')) video_upload_date = self._html_search_regex( [r'<div>Availability for free users:(.+?)</div>', r'<div>[^<>]+<span>\s*(.+?\d{4})\s*</span></div>'], webpage, 'video_upload_date', fatal=False, flags=re.DOTALL) @@ -285,7 +308,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text 'video_uploader', fatal=False) playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url')) - playerdata_req = compat_urllib_request.Request(playerdata_url) + playerdata_req = sanitized_Request(playerdata_url) playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url}) playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info') @@ -297,7 +320,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text for fmt in re.findall(r'showmedia\.([0-9]{3,4})p', webpage): stream_quality, stream_format = self._FORMAT_IDS[fmt] video_format = fmt + 'p' - streamdata_req = compat_urllib_request.Request( + streamdata_req = sanitized_Request( 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s' % (stream_id, stream_format, stream_quality), compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8')) @@ -352,7 +375,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE): IE_NAME = "crunchyroll:playlist" - _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$' + _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)' _TESTS = [{ 'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi', @@ -361,12 +384,25 @@ class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE): 'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi' }, 'playlist_count': 13, + }, { + # geo-restricted (US), 18+ maturity wall, non-premium available + 'url': 'http://www.crunchyroll.com/cosplay-complex-ova', + 'info_dict': { + 'id': 'cosplay-complex-ova', + 'title': 'Cosplay Complex OVA' + }, + 'playlist_count': 3, + 'skip': 'Georestricted', + }, { + # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14 + 'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1', + 'only_matching': True, }] def _real_extract(self, url): show_id = self._match_id(url) - webpage = self._download_webpage(url, show_id) + webpage = self._download_webpage(self._add_skip_wall(url), show_id) title = self._html_search_regex( r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>', webpage, 'title') diff --git a/youtube_dl/extractor/cspan.py b/youtube_dl/extractor/cspan.py index fbefd37d0..b3ee67018 100644 --- a/youtube_dl/extractor/cspan.py +++ b/youtube_dl/extractor/cspan.py @@ -9,6 +9,7 @@ from ..utils import ( find_xpath_attr, smuggle_url, determine_ext, + ExtractorError, ) from .senateisvp import SenateISVPIE @@ -18,33 +19,32 @@ class CSpanIE(InfoExtractor): IE_DESC = 'C-SPAN' _TESTS = [{ 'url': 'http://www.c-span.org/video/?313572-1/HolderonV', - 'md5': '8e44ce11f0f725527daccc453f553eb0', + 'md5': '94b29a4f131ff03d23471dd6f60b6a1d', 'info_dict': { 'id': '315139', 'ext': 'mp4', 'title': 'Attorney General Eric Holder on Voting Rights Act Decision', - 'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.', + 'description': 'Attorney General Eric Holder speaks to reporters following the Supreme Court decision in [Shelby County v. Holder], in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced.', }, 'skip': 'Regularly fails on travis, for unknown reasons', }, { 'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models', - # For whatever reason, the served video alternates between - # two different ones + 'md5': '8e5fbfabe6ad0f89f3012a7943c1287b', 'info_dict': { - 'id': '340723', + 'id': 'c4486943', 'ext': 'mp4', - 'title': 'International Health Care Models', + 'title': 'CSPAN - International Health Care Models', 'description': 'md5:7a985a2d595dba00af3d9c9f0783c967', } }, { 'url': 'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall', - 'md5': '446562a736c6bf97118e389433ed88d4', + 'md5': '2ae5051559169baadba13fc35345ae74', 'info_dict': { 'id': '342759', 'ext': 'mp4', 'title': 'General Motors Ignition Switch Recall', 'duration': 14848, - 'description': 'md5:70c7c3b8fa63fa60d42772440596034c' + 'description': 'md5:118081aedd24bf1d3b68b3803344e7f3' }, }, { # Video from senate.gov @@ -57,67 +57,82 @@ class CSpanIE(InfoExtractor): }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - page_id = mobj.group('id') - webpage = self._download_webpage(url, page_id) - video_id = self._search_regex(r'progid=\'?([0-9]+)\'?>', webpage, 'video id') + video_id = self._match_id(url) + video_type = None + webpage = self._download_webpage(url, video_id) + # We first look for clipid, because clipprog always appears before + patterns = [r'id=\'clip(%s)\'\s*value=\'([0-9]+)\'' % t for t in ('id', 'prog')] + results = list(filter(None, (re.search(p, webpage) for p in patterns))) + if results: + matches = results[0] + video_type, video_id = matches.groups() + video_type = 'clip' if video_type == 'id' else 'program' + else: + senate_isvp_url = SenateISVPIE._search_iframe_url(webpage) + if senate_isvp_url: + title = self._og_search_title(webpage) + surl = smuggle_url(senate_isvp_url, {'force_title': title}) + return self.url_result(surl, 'SenateISVP', video_id, title) + if video_type is None or video_id is None: + raise ExtractorError('unable to find video id and type') - description = self._html_search_regex( - [ - # The full description - r'<div class=\'expandable\'>(.*?)<a href=\'#\'', - # If the description is small enough the other div is not - # present, otherwise this is a stripped version - r'<p class=\'initial\'>(.*?)</p>' - ], - webpage, 'description', flags=re.DOTALL, default=None) + def get_text_attr(d, attr): + return d.get(attr, {}).get('#text') - info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id - data = self._download_json(info_url, video_id) + data = self._download_json( + 'http://www.c-span.org/assets/player/ajax-player.php?os=android&html5=%s&id=%s' % (video_type, video_id), + video_id)['video'] + if data['@status'] != 'Success': + raise ExtractorError('%s said: %s' % (self.IE_NAME, get_text_attr(data, 'error')), expected=True) doc = self._download_xml( - 'http://www.c-span.org/common/services/flashXml.php?programid=' + video_id, + 'http://www.c-span.org/common/services/flashXml.php?%sid=%s' % (video_type, video_id), video_id) + description = self._html_search_meta('description', webpage) + title = find_xpath_attr(doc, './/string', 'name', 'title').text thumbnail = find_xpath_attr(doc, './/string', 'name', 'poster').text - senate_isvp_url = SenateISVPIE._search_iframe_url(webpage) - if senate_isvp_url: - surl = smuggle_url(senate_isvp_url, {'force_title': title}) - return self.url_result(surl, 'SenateISVP', video_id, title) - - files = data['video']['files'] - try: - capfile = data['video']['capfile']['#text'] - except KeyError: - capfile = None + files = data['files'] + capfile = get_text_attr(data, 'capfile') - entries = [{ - 'id': '%s_%d' % (video_id, partnum + 1), - 'title': ( - title if len(files) == 1 else - '%s part %d' % (title, partnum + 1)), - 'url': unescapeHTML(f['path']['#text']), - 'description': description, - 'thumbnail': thumbnail, - 'duration': int_or_none(f.get('length', {}).get('#text')), - 'subtitles': { - 'en': [{ - 'url': capfile, - 'ext': determine_ext(capfile, 'dfxp') - }], - } if capfile else None, - } for partnum, f in enumerate(files)] + entries = [] + for partnum, f in enumerate(files): + formats = [] + for quality in f['qualities']: + formats.append({ + 'format_id': '%s-%sp' % (get_text_attr(quality, 'bitrate'), get_text_attr(quality, 'height')), + 'url': unescapeHTML(get_text_attr(quality, 'file')), + 'height': int_or_none(get_text_attr(quality, 'height')), + 'tbr': int_or_none(get_text_attr(quality, 'bitrate')), + }) + self._sort_formats(formats) + entries.append({ + 'id': '%s_%d' % (video_id, partnum + 1), + 'title': ( + title if len(files) == 1 else + '%s part %d' % (title, partnum + 1)), + 'formats': formats, + 'description': description, + 'thumbnail': thumbnail, + 'duration': int_or_none(get_text_attr(f, 'length')), + 'subtitles': { + 'en': [{ + 'url': capfile, + 'ext': determine_ext(capfile, 'dfxp') + }], + } if capfile else None, + }) if len(entries) == 1: entry = dict(entries[0]) - entry['id'] = video_id + entry['id'] = 'c' + video_id if video_type == 'clip' else video_id return entry else: return { '_type': 'playlist', 'entries': entries, 'title': title, - 'id': video_id, + 'id': 'c' + video_id if video_type == 'clip' else video_id, } diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 2d90b2224..439fd42e8 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -7,15 +7,13 @@ import itertools from .common import InfoExtractor -from ..compat import ( - compat_str, - compat_urllib_request, -) from ..utils import ( - ExtractorError, determine_ext, + error_to_compat_str, + ExtractorError, int_or_none, parse_iso8601, + sanitized_Request, str_to_int, unescapeHTML, ) @@ -25,7 +23,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor): @staticmethod def _build_request(url): """Build a request with the family filter disabled""" - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Cookie', 'family_filter=off; ff=off') return request @@ -96,6 +94,16 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'uploader': 'HotWaves1012', 'age_limit': 18, } + }, + # geo-restricted, player v5 + { + 'url': 'http://www.dailymotion.com/video/xhza0o', + 'only_matching': True, + }, + # with subtitles + { + 'url': 'http://www.dailymotion.com/video/x20su5f_the-power-of-nightmares-1-the-rise-of-the-politics-of-fear-bbc-2004_news', + 'only_matching': True, } ] @@ -119,11 +127,16 @@ class DailymotionIE(DailymotionBaseInfoExtractor): webpage, 'comment count', fatal=False)) player_v5 = self._search_regex( - r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);', + [r'buildPlayer\(({.+?})\);\n', # See https://github.com/rg3/youtube-dl/issues/7826 + r'playerV5\s*=\s*dmp\.create\([^,]+?,\s*({.+?})\);', + r'buildPlayer\(({.+?})\);'], webpage, 'player v5', default=None) if player_v5: player = self._parse_json(player_v5, video_id) metadata = player['metadata'] + + self._check_error(metadata) + formats = [] for quality, media_list in metadata['qualities'].items(): for media in media_list: @@ -133,9 +146,13 @@ class DailymotionIE(DailymotionBaseInfoExtractor): type_ = media.get('type') if type_ == 'application/vnd.lumberjack.manifest': continue - if type_ == 'application/x-mpegURL' or determine_ext(media_url) == 'm3u8': + ext = determine_ext(media_url) + if type_ == 'application/x-mpegURL' or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( - media_url, video_id, 'mp4', m3u8_id='hls')) + media_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) + elif type_ == 'application/f4m' or ext == 'f4m': + formats.extend(self._extract_f4m_formats( + media_url, video_id, preference=-1, f4m_id='hds', fatal=False)) else: f = { 'url': media_url, @@ -158,11 +175,13 @@ class DailymotionIE(DailymotionBaseInfoExtractor): uploader_id = metadata.get('owner', {}).get('id') subtitles = {} - for subtitle_lang, subtitle in metadata.get('subtitles', {}).get('data', {}).items(): - subtitles[subtitle_lang] = [{ - 'ext': determine_ext(subtitle_url), - 'url': subtitle_url, - } for subtitle_url in subtitle.get('urls', [])] + subtitles_data = metadata.get('subtitles', {}).get('data', {}) + if subtitles_data and isinstance(subtitles_data, dict): + for subtitle_lang, subtitle in subtitles_data.items(): + subtitles[subtitle_lang] = [{ + 'ext': determine_ext(subtitle_url), + 'url': subtitle_url, + } for subtitle_url in subtitle.get('urls', [])] return { 'id': video_id, @@ -201,9 +220,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'video info', flags=re.MULTILINE), video_id) - if info.get('error') is not None: - msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title'] - raise ExtractorError(msg, expected=True) + self._check_error(info) formats = [] for (key, format_id) in self._FORMATS: @@ -246,13 +263,18 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'duration': info['duration'] } + def _check_error(self, info): + if info.get('error') is not None: + raise ExtractorError( + '%s said: %s' % (self.IE_NAME, info['error']['title']), expected=True) + def _get_subtitles(self, video_id, webpage): try: sub_list = self._download_webpage( 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id, video_id, note=False) except ExtractorError as err: - self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err)) + self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err)) return {} info = json.loads(sub_list) if (info['total'] > 0): diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py index 934da765e..f08f57157 100644 --- a/youtube_dl/extractor/daum.py +++ b/youtube_dl/extractor/daum.py @@ -2,57 +2,51 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, +from ..compat import compat_urllib_parse +from ..utils import ( + int_or_none, + str_to_int, + xpath_text, ) class DaumIE(InfoExtractor): - _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)' + _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/v/(?P<id>[^?#&]+)' IE_NAME = 'daum.net' _TESTS = [{ - 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690', + 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz', 'info_dict': { - 'id': '52554690', + 'id': 'vab4dyeDBysyBssyukBUjBz', 'ext': 'mp4', - 'title': 'DOTA 2GETHER 시즌2 6회 - 2부', - 'description': 'DOTA 2GETHER 시즌2 6회 - 2부', - 'upload_date': '20130831', - 'duration': 3868, + 'title': '마크 헌트 vs 안토니오 실바', + 'description': 'Mark Hunt vs Antonio Silva', + 'upload_date': '20131217', + 'duration': 2117, + 'view_count': int, + 'comment_count': int, }, }, { - 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz', - 'only_matching': True, - }, { 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24', 'only_matching': True, }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - canonical_url = 'http://tvpot.daum.net/v/%s' % video_id - webpage = self._download_webpage(canonical_url, video_id) - full_id = self._search_regex( - r'src=["\']http://videofarm\.daum\.net/controller/video/viewer/Video\.html\?.*?vid=(.+?)[&"\']', - webpage, 'full id') - query = compat_urllib_parse.urlencode({'vid': full_id}) + video_id = self._match_id(url) + query = compat_urllib_parse.urlencode({'vid': video_id}) info = self._download_xml( 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id, 'Downloading video info') - urls = self._download_xml( - 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query, + movie_data = self._download_json( + 'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query, video_id, 'Downloading video formats info') formats = [] - for format_el in urls.findall('result/output_list/output_list'): - profile = format_el.attrib['profile'] + for format_el in movie_data['output_list']['output_list']: + profile = format_el['profile'] format_query = compat_urllib_parse.urlencode({ - 'vid': full_id, + 'vid': video_id, 'profile': profile, }) url_doc = self._download_xml( @@ -62,14 +56,57 @@ class DaumIE(InfoExtractor): formats.append({ 'url': format_url, 'format_id': profile, + 'width': int_or_none(format_el.get('width')), + 'height': int_or_none(format_el.get('height')), + 'filesize': int_or_none(format_el.get('filesize')), }) + self._sort_formats(formats) return { 'id': video_id, 'title': info.find('TITLE').text, 'formats': formats, - 'thumbnail': self._og_search_thumbnail(webpage), - 'description': info.find('CONTENTS').text, - 'duration': int(info.find('DURATION').text), + 'thumbnail': xpath_text(info, 'THUMB_URL'), + 'description': xpath_text(info, 'CONTENTS'), + 'duration': int_or_none(xpath_text(info, 'DURATION')), 'upload_date': info.find('REGDTTM').text[:8], + 'view_count': str_to_int(xpath_text(info, 'PLAY_CNT')), + 'comment_count': str_to_int(xpath_text(info, 'COMMENT_CNT')), + } + + +class DaumClipIE(InfoExtractor): + _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:clip/ClipView.do|mypot/View.do)\?.*?clipid=(?P<id>\d+)' + IE_NAME = 'daum.net:clip' + + _TESTS = [{ + 'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690', + 'info_dict': { + 'id': '52554690', + 'ext': 'mp4', + 'title': 'DOTA 2GETHER 시즌2 6회 - 2부', + 'description': 'DOTA 2GETHER 시즌2 6회 - 2부', + 'upload_date': '20130831', + 'duration': 3868, + 'view_count': int, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + clip_info = self._download_json( + 'http://tvpot.daum.net/mypot/json/GetClipInfo.do?clipid=%s' % video_id, + video_id, 'Downloading clip info')['clip_bean'] + + return { + '_type': 'url_transparent', + 'id': video_id, + 'url': 'http://tvpot.daum.net/v/%s' % clip_info['vid'], + 'title': clip_info['title'], + 'thumbnail': clip_info.get('thumb_url'), + 'description': clip_info.get('contents'), + 'duration': int_or_none(clip_info.get('duration')), + 'upload_date': clip_info.get('up_date')[:8], + 'view_count': int_or_none(clip_info.get('play_count')), + 'ie_key': 'Daum', } diff --git a/youtube_dl/extractor/dbtv.py b/youtube_dl/extractor/dbtv.py index 212217625..133cdc50b 100644 --- a/youtube_dl/extractor/dbtv.py +++ b/youtube_dl/extractor/dbtv.py @@ -13,8 +13,8 @@ from ..utils import ( class DBTVIE(InfoExtractor): - _VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?dbtv\.no/(?:(?:lazyplayer|player)/)?(?P<id>[0-9]+)(?:#(?P<display_id>.+))?' + _TESTS = [{ 'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen', 'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc', 'info_dict': { @@ -30,12 +30,18 @@ class DBTVIE(InfoExtractor): 'view_count': int, 'categories': list, } - } + }, { + 'url': 'http://dbtv.no/3649835190001', + 'only_matching': True, + }, { + 'url': 'http://www.dbtv.no/lazyplayer/4631135248001', + 'only_matching': True, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - display_id = mobj.group('display_id') + display_id = mobj.group('display_id') or video_id data = self._download_json( 'http://api.dbtv.no/discovery/%s' % video_id, display_id) diff --git a/youtube_dl/extractor/dcn.py b/youtube_dl/extractor/dcn.py index 6f2fea5ff..8f48571de 100644 --- a/youtube_dl/extractor/dcn.py +++ b/youtube_dl/extractor/dcn.py @@ -1,28 +1,87 @@ # coding: utf-8 from __future__ import unicode_literals +import re +import base64 + from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( int_or_none, parse_iso8601, + sanitized_Request, + smuggle_url, + unsmuggle_url, ) class DCNIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/.+|show/\d+/.+?)/(?P<id>\d+)' + _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<video_id>\d+)/(?P<season_id>\d+))?' + + def _real_extract(self, url): + show_id, video_id, season_id = re.match(self._VALID_URL, url).groups() + if video_id and int(video_id) > 0: + return self.url_result( + 'http://www.dcndigital.ae/media/%s' % video_id, 'DCNVideo') + elif season_id and int(season_id) > 0: + return self.url_result(smuggle_url( + 'http://www.dcndigital.ae/program/season/%s' % season_id, + {'show_id': show_id}), 'DCNSeason') + else: + return self.url_result( + 'http://www.dcndigital.ae/program/%s' % show_id, 'DCNSeason') + + +class DCNBaseIE(InfoExtractor): + def _extract_video_info(self, video_data, video_id, is_live): + title = video_data.get('title_en') or video_data['title_ar'] + img = video_data.get('img') + thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None + duration = int_or_none(video_data.get('duration')) + description = video_data.get('description_en') or video_data.get('description_ar') + timestamp = parse_iso8601(video_data.get('create_time'), ' ') + + return { + 'id': video_id, + 'title': self._live_title(title) if is_live else title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'timestamp': timestamp, + 'is_live': is_live, + } + + def _extract_video_formats(self, webpage, video_id, entry_protocol): + formats = [] + m3u8_url = self._html_search_regex( + r'file\s*:\s*"([^"]+)', webpage, 'm3u8 url', fatal=False) + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=None)) + + rtsp_url = self._search_regex( + r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False) + if rtsp_url: + formats.append({ + 'url': rtsp_url, + 'format_id': 'rtsp', + }) + + self._sort_formats(formats) + return formats + + +class DCNVideoIE(DCNBaseIE): + IE_NAME = 'dcn:video' + _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?(?:video/[^/]+|media|catchup/[^/]+/[^/]+)/(?P<id>\d+)' _TEST = { - 'url': 'http://www.dcndigital.ae/#/show/199074/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375/6887', + 'url': 'http://www.dcndigital.ae/#/video/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375', 'info_dict': { 'id': '17375', 'ext': 'mp4', 'title': 'رحلة العمر : الحلقة 1', 'description': 'md5:0156e935d870acb8ef0a66d24070c6d6', - 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 2041, 'timestamp': 1227504126, 'upload_date': '20081124', @@ -36,49 +95,98 @@ class DCNIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id, headers={'Origin': 'http://www.dcndigital.ae'}) - - video = self._download_json(request, video_id) - title = video.get('title_en') or video['title_ar'] + video_data = self._download_json(request, video_id) + info = self._extract_video_info(video_data, video_id, False) webpage = self._download_webpage( 'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' + compat_urllib_parse.urlencode({ - 'id': video['id'], - 'user_id': video['user_id'], - 'signature': video['signature'], + 'id': video_data['id'], + 'user_id': video_data['user_id'], + 'signature': video_data['signature'], 'countries': 'Q0M=', 'filter': 'DENY', }), video_id) + info['formats'] = self._extract_video_formats(webpage, video_id, 'm3u8_native') + return info - m3u8_url = self._html_search_regex(r'file:\s*"([^"]+)', webpage, 'm3u8 url') - formats = self._extract_m3u8_formats( - m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') - rtsp_url = self._search_regex( - r'<a[^>]+href="(rtsp://[^"]+)"', webpage, 'rtsp url', fatal=False) - if rtsp_url: - formats.append({ - 'url': rtsp_url, - 'format_id': 'rtsp', +class DCNLiveIE(DCNBaseIE): + IE_NAME = 'dcn:live' + _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?live/(?P<id>\d+)' + + def _real_extract(self, url): + channel_id = self._match_id(url) + + request = sanitized_Request( + 'http://admin.mangomolo.com/analytics/index.php/plus/getchanneldetails?channel_id=%s' % channel_id, + headers={'Origin': 'http://www.dcndigital.ae'}) + + channel_data = self._download_json(request, channel_id) + info = self._extract_video_info(channel_data, channel_id, True) + + webpage = self._download_webpage( + 'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' + + compat_urllib_parse.urlencode({ + 'id': base64.b64encode(channel_data['user_id'].encode()).decode(), + 'channelid': base64.b64encode(channel_data['id'].encode()).decode(), + 'signature': channel_data['signature'], + 'countries': 'Q0M=', + 'filter': 'DENY', + }), channel_id) + info['formats'] = self._extract_video_formats(webpage, channel_id, 'm3u8') + return info + + +class DCNSeasonIE(InfoExtractor): + IE_NAME = 'dcn:season' + _VALID_URL = r'https?://(?:www\.)?dcndigital\.ae/(?:#/)?program/(?:(?P<show_id>\d+)|season/(?P<season_id>\d+))' + _TEST = { + 'url': 'http://dcndigital.ae/#/program/205024/%D9%85%D8%AD%D8%A7%D8%B6%D8%B1%D8%A7%D8%AA-%D8%A7%D9%84%D8%B4%D9%8A%D8%AE-%D8%A7%D9%84%D8%B4%D8%B9%D8%B1%D8%A7%D9%88%D9%8A', + 'info_dict': + { + 'id': '7910', + 'title': 'محاضرات الشيخ الشعراوي', + }, + 'playlist_mincount': 27, + } + + def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + show_id, season_id = re.match(self._VALID_URL, url).groups() + + data = {} + if season_id: + data['season'] = season_id + show_id = smuggled_data.get('show_id') + if show_id is None: + request = sanitized_Request( + 'http://admin.mangomolo.com/analytics/index.php/plus/season_info?id=%s' % season_id, + headers={'Origin': 'http://www.dcndigital.ae'}) + season = self._download_json(request, season_id) + show_id = season['id'] + data['show_id'] = show_id + request = sanitized_Request( + 'http://admin.mangomolo.com/analytics/index.php/plus/show', + compat_urllib_parse.urlencode(data), + { + 'Origin': 'http://www.dcndigital.ae', + 'Content-Type': 'application/x-www-form-urlencoded' }) - self._sort_formats(formats) + show = self._download_json(request, show_id) + if not season_id: + season_id = show['default_season'] + for season in show['seasons']: + if season['id'] == season_id: + title = season.get('title_en') or season['title_ar'] - img = video.get('img') - thumbnail = 'http://admin.mangomolo.com/analytics/%s' % img if img else None - duration = int_or_none(video.get('duration')) - description = video.get('description_en') or video.get('description_ar') - timestamp = parse_iso8601(video.get('create_time') or video.get('update_time'), ' ') + entries = [] + for video in show['videos']: + entries.append(self.url_result( + 'http://www.dcndigital.ae/media/%s' % video['id'], 'DCNVideo')) - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'duration': duration, - 'timestamp': timestamp, - 'formats': formats, - } + return self.playlist_result(entries, season_id, title) diff --git a/youtube_dl/extractor/democracynow.py b/youtube_dl/extractor/democracynow.py new file mode 100644 index 000000000..6cd395e11 --- /dev/null +++ b/youtube_dl/extractor/democracynow.py @@ -0,0 +1,88 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +import os.path + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + url_basename, + remove_start, +) + + +class DemocracynowIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?democracynow.org/(?P<id>[^\?]*)' + IE_NAME = 'democracynow' + _TESTS = [{ + 'url': 'http://www.democracynow.org/shows/2015/7/3', + 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d', + 'info_dict': { + 'id': '2015-0703-001', + 'ext': 'mp4', + 'title': 'July 03, 2015 - Democracy Now!', + 'description': 'A daily independent global news hour with Amy Goodman & Juan González "What to the Slave is 4th of July?": James Earl Jones Reads Frederick Douglass\u2019 Historic Speech : "This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag : "We Shall Overcome": Remembering Folk Icon, Activist Pete Seeger in His Own Words & Songs', + }, + }, { + 'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree', + 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d', + 'info_dict': { + 'id': '2015-0703-001', + 'ext': 'mp4', + 'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag', + 'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21', + }, + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + description = self._og_search_description(webpage) + + json_data = self._parse_json(self._search_regex( + r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'), + display_id) + video_id = None + formats = [] + + default_lang = 'en' + + subtitles = {} + + def add_subtitle_item(lang, info_dict): + if lang not in subtitles: + subtitles[lang] = [] + subtitles[lang].append(info_dict) + + # chapter_file are not subtitles + if 'caption_file' in json_data: + add_subtitle_item(default_lang, { + 'url': compat_urlparse.urljoin(url, json_data['caption_file']), + }) + + for subtitle_item in json_data.get('captions', []): + lang = subtitle_item.get('language', '').lower() or default_lang + add_subtitle_item(lang, { + 'url': compat_urlparse.urljoin(url, subtitle_item['url']), + }) + + for key in ('file', 'audio', 'video'): + media_url = json_data.get(key, '') + if not media_url: + continue + media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url)) + video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn') + formats.append({ + 'url': media_url, + }) + + self._sort_formats(formats) + + return { + 'id': video_id or display_id, + 'title': json_data['title'], + 'description': description, + 'subtitles': subtitles, + 'formats': formats, + } diff --git a/youtube_dl/extractor/dplay.py b/youtube_dl/extractor/dplay.py new file mode 100644 index 000000000..6cda56a7f --- /dev/null +++ b/youtube_dl/extractor/dplay.py @@ -0,0 +1,51 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import time + +from .common import InfoExtractor +from ..utils import int_or_none + + +class DPlayIE(InfoExtractor): + _VALID_URL = r'http://www\.dplay\.se/[^/]+/(?P<id>[^/?#]+)' + + _TEST = { + 'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/', + 'info_dict': { + 'id': '3172', + 'ext': 'mp4', + 'display_id': 'season-1-svensken-lar-sig-njuta-av-livet', + 'title': 'Svensken lär sig njuta av livet', + 'duration': 2650, + }, + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_id = self._search_regex( + r'data-video-id="(\d+)"', webpage, 'video id') + + info = self._download_json( + 'http://www.dplay.se/api/v2/ajax/videos?video_id=' + video_id, + video_id)['data'][0] + + self._set_cookie( + 'secure.dplay.se', 'dsc-geo', + '{"countryCode":"NL","expiry":%d}' % ((time.time() + 20 * 60) * 1000)) + # TODO: consider adding support for 'stream_type=hds', it seems to + # require setting some cookies + manifest_url = self._download_json( + 'https://secure.dplay.se/secure/api/v2/user/authorization/stream/%s?stream_type=hls' % video_id, + video_id, 'Getting manifest url for hls stream')['hls'] + formats = self._extract_m3u8_formats( + manifest_url, video_id, ext='mp4', entry_protocol='m3u8_native') + + return { + 'id': video_id, + 'display_id': display_id, + 'title': info['title'], + 'formats': formats, + 'duration': int_or_none(info.get('video_metadata_length'), scale=1000), + } diff --git a/youtube_dl/extractor/dramafever.py b/youtube_dl/extractor/dramafever.py index 38e6597c8..60ed438f8 100644 --- a/youtube_dl/extractor/dramafever.py +++ b/youtube_dl/extractor/dramafever.py @@ -3,23 +3,20 @@ from __future__ import unicode_literals import itertools -from .common import InfoExtractor +from .amp import AMPIE from ..compat import ( compat_HTTPError, compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( ExtractorError, clean_html, - determine_ext, - int_or_none, - parse_iso8601, + sanitized_Request, ) -class DramaFeverBaseIE(InfoExtractor): +class DramaFeverBaseIE(AMPIE): _LOGIN_URL = 'https://www.dramafever.com/accounts/login/' _NETRC_MACHINE = 'dramafever' @@ -51,7 +48,7 @@ class DramaFeverBaseIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) response = self._download_webpage( request, None, 'Logging in as %s' % username) @@ -80,60 +77,25 @@ class DramaFeverIE(DramaFeverBaseIE): 'timestamp': 1404336058, 'upload_date': '20140702', 'duration': 343, - } + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, } def _real_extract(self, url): video_id = self._match_id(url).replace('/', '.') try: - feed = self._download_json( - 'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id, - video_id, 'Downloading episode JSON')['channel']['item'] + info = self._extract_feed_info( + 'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError): raise ExtractorError( 'Currently unavailable in your country.', expected=True) raise - media_group = feed.get('media-group', {}) - - formats = [] - for media_content in media_group['media-content']: - src = media_content.get('@attributes', {}).get('url') - if not src: - continue - ext = determine_ext(src) - if ext == 'f4m': - formats.extend(self._extract_f4m_formats( - src, video_id, f4m_id='hds')) - elif ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( - src, video_id, 'mp4', m3u8_id='hls')) - else: - formats.append({ - 'url': src, - }) - self._sort_formats(formats) - - title = media_group.get('media-title') - description = media_group.get('media-description') - duration = int_or_none(media_group['media-content'][0].get('@attributes', {}).get('duration')) - thumbnail = self._proto_relative_url( - media_group.get('media-thumbnail', {}).get('@attributes', {}).get('url')) - timestamp = parse_iso8601(feed.get('pubDate'), ' ') - - subtitles = {} - for media_subtitle in media_group.get('media-subTitle', []): - lang = media_subtitle.get('@attributes', {}).get('lang') - href = media_subtitle.get('@attributes', {}).get('href') - if not lang or not href: - continue - subtitles[lang] = [{ - 'ext': 'ttml', - 'url': href, - }] - series_id, episode_number = video_id.split('.') episode_info = self._download_json( # We only need a single episode info, so restricting page size to one episode @@ -146,21 +108,12 @@ class DramaFeverIE(DramaFeverBaseIE): if value: subfile = value[0].get('subfile') or value[0].get('new_subfile') if subfile and subfile != 'http://www.dramafever.com/st/': - subtitles.setdefault('English', []).append({ + info['subtitiles'].setdefault('English', []).append({ 'ext': 'srt', 'url': subfile, }) - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'timestamp': timestamp, - 'duration': duration, - 'formats': formats, - 'subtitles': subtitles, - } + return info class DramaFeverSeriesIE(DramaFeverBaseIE): diff --git a/youtube_dl/extractor/dumpert.py b/youtube_dl/extractor/dumpert.py index 1f00386fe..e5aadcd25 100644 --- a/youtube_dl/extractor/dumpert.py +++ b/youtube_dl/extractor/dumpert.py @@ -2,14 +2,17 @@ from __future__ import unicode_literals import base64 +import re from .common import InfoExtractor -from ..compat import compat_urllib_request -from ..utils import qualities +from ..utils import ( + qualities, + sanitized_Request, +) class DumpertIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)' + _VALID_URL = r'(?P<protocol>https?)://(?:www\.)?dumpert\.nl/(?:mediabase|embed)/(?P<id>[0-9]+/[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/', 'md5': '1b9318d7d5054e7dcb9dc7654f21d643', @@ -26,10 +29,12 @@ class DumpertIE(InfoExtractor): }] def _real_extract(self, url): - video_id = self._match_id(url) + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + protocol = mobj.group('protocol') - url = 'https://www.dumpert.nl/mediabase/' + video_id - req = compat_urllib_request.Request(url) + url = '%s://www.dumpert.nl/mediabase/%s' % (protocol, video_id) + req = sanitized_Request(url) req.add_header('Cookie', 'nsfw=1; cpc=10') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/eagleplatform.py b/youtube_dl/extractor/eagleplatform.py index a1ee51568..7bbf617d4 100644 --- a/youtube_dl/extractor/eagleplatform.py +++ b/youtube_dl/extractor/eagleplatform.py @@ -21,7 +21,7 @@ class EaglePlatformIE(InfoExtractor): _TESTS = [{ # http://lenta.ru/news/2015/03/06/navalny/ 'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201', - 'md5': '0b7994faa2bd5c0f69a3db6db28d078d', + 'md5': '70f5187fb620f2c1d503b3b22fd4efe3', 'info_dict': { 'id': '227304', 'ext': 'mp4', @@ -36,7 +36,7 @@ class EaglePlatformIE(InfoExtractor): # http://muz-tv.ru/play/7129/ # http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true 'url': 'eagleplatform:media.clipyou.ru:12820', - 'md5': '6c2ebeab03b739597ce8d86339d5a905', + 'md5': '90b26344ba442c8e44aa4cf8f301164a', 'info_dict': { 'id': '12820', 'ext': 'mp4', @@ -48,7 +48,8 @@ class EaglePlatformIE(InfoExtractor): 'skip': 'Georestricted', }] - def _handle_error(self, response): + @staticmethod + def _handle_error(response): status = int_or_none(response.get('status', 200)) if status != 200: raise ExtractorError(' '.join(response['errors']), expected=True) @@ -58,6 +59,9 @@ class EaglePlatformIE(InfoExtractor): self._handle_error(response) return response + def _get_video_url(self, url_or_request, video_id, note='Downloading JSON metadata'): + return self._download_json(url_or_request, video_id, note)['data'][0] + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id') @@ -69,7 +73,7 @@ class EaglePlatformIE(InfoExtractor): title = media['title'] description = media.get('description') - thumbnail = media.get('snapshot') + thumbnail = self._proto_relative_url(media.get('snapshot'), 'http:') duration = int_or_none(media.get('duration')) view_count = int_or_none(media.get('views')) @@ -78,13 +82,20 @@ class EaglePlatformIE(InfoExtractor): if age_restriction: age_limit = 0 if age_restriction == 'allow_all' else 18 - m3u8_data = self._download_json( - self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:'), - video_id, 'Downloading m3u8 JSON') + secure_m3u8 = self._proto_relative_url(media['sources']['secure_m3u8']['auto'], 'http:') + m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading m3u8 JSON') formats = self._extract_m3u8_formats( - m3u8_data['data'][0], video_id, - 'mp4', entry_protocol='m3u8_native') + m3u8_url, video_id, + 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') + + mp4_url = self._get_video_url( + # Secure mp4 URL is constructed according to Player.prototype.mp4 from + # http://lentaru.media.eagleplatform.com/player/player.js + re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4', secure_m3u8), + video_id, 'Downloading mp4 JSON') + formats.append({'url': mp4_url, 'format_id': 'mp4'}) + self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/eitb.py b/youtube_dl/extractor/eitb.py index 2cba82532..713cb7b32 100644 --- a/youtube_dl/extractor/eitb.py +++ b/youtube_dl/extractor/eitb.py @@ -1,39 +1,88 @@ # encoding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor -from .brightcove import BrightcoveIE -from ..utils import ExtractorError +from ..utils import ( + float_or_none, + int_or_none, + parse_iso8601, + sanitized_Request, +) class EitbIE(InfoExtractor): IE_NAME = 'eitb.tv' - _VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)' + _VALID_URL = r'https?://(?:www\.)?eitb\.tv/(?:eu/bideoa|es/video)/[^/]+/\d+/(?P<id>\d+)' _TEST = { - 'add_ie': ['Brightcove'], - 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/', + 'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/4104995148001/4090227752001/lasa-y-zabala-30-anos/', 'md5': 'edf4436247185adee3ea18ce64c47998', 'info_dict': { - 'id': '2743577154001', + 'id': '4090227752001', 'ext': 'mp4', 'title': '60 minutos (Lasa y Zabala, 30 años)', - # All videos from eitb has this description in the brightcove info - 'description': '.', - 'uploader': 'Euskal Telebista', + 'description': 'Programa de reportajes de actualidad.', + 'duration': 3996.76, + 'timestamp': 1381789200, + 'upload_date': '20131014', + 'tags': list, }, } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - chapter_id = mobj.group('chapter_id') - webpage = self._download_webpage(url, chapter_id) - bc_url = BrightcoveIE._extract_brightcove_url(webpage) - if bc_url is None: - raise ExtractorError('Could not extract the Brightcove url') - # The BrightcoveExperience object doesn't contain the video id, we set - # it manually - bc_url += '&%40videoPlayer={0}'.format(chapter_id) - return self.url_result(bc_url, BrightcoveIE.ie_key()) + video_id = self._match_id(url) + + video = self._download_json( + 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/Video/MULTIWEBTV/%s/' % video_id, + video_id, 'Downloading video JSON') + + media = video['web_media'][0] + + formats = [] + for rendition in media['RENDITIONS']: + video_url = rendition.get('PMD_URL') + if not video_url: + continue + tbr = float_or_none(rendition.get('ENCODING_RATE'), 1000) + format_id = 'http' + if tbr: + format_id += '-%d' % int(tbr) + formats.append({ + 'url': rendition['PMD_URL'], + 'format_id': format_id, + 'width': int_or_none(rendition.get('FRAME_WIDTH')), + 'height': int_or_none(rendition.get('FRAME_HEIGHT')), + 'tbr': tbr, + }) + + hls_url = media.get('HLS_SURL') + if hls_url: + request = sanitized_Request( + 'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/', + headers={'Referer': url}) + token_data = self._download_json( + request, video_id, 'Downloading auth token', fatal=False) + if token_data: + token = token_data.get('token') + if token: + formats.extend(self._extract_m3u8_formats( + '%s?hdnts=%s' % (hls_url, token), video_id, m3u8_id='hls', fatal=False)) + + hds_url = media.get('HDS_SURL') + if hds_url: + formats.extend(self._extract_f4m_formats( + '%s?hdcore=3.7.0' % hds_url.replace('euskalsvod', 'euskalvod'), + video_id, f4m_id='hds', fatal=False)) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': media.get('NAME_ES') or media.get('name') or media['NAME_EU'], + 'description': media.get('SHORT_DESC_ES') or video.get('desc_group') or media.get('SHORT_DESC_EU'), + 'thumbnail': media.get('STILL_URL') or media.get('THUMBNAIL_URL'), + 'duration': float_or_none(media.get('LENGTH'), 1000), + 'timestamp': parse_iso8601(media.get('BROADCST_DATE'), ' '), + 'tags': media.get('TAGS'), + 'formats': formats, + } diff --git a/youtube_dl/extractor/ellentv.py b/youtube_dl/extractor/ellentv.py index 02c6a4615..476cce2d0 100644 --- a/youtube_dl/extractor/ellentv.py +++ b/youtube_dl/extractor/ellentv.py @@ -13,12 +13,12 @@ class EllenTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:ellentv|ellentube)\.com/videos/(?P<id>[a-z0-9_-]+)' _TEST = { 'url': 'http://www.ellentv.com/videos/0-ipq1gsai/', - 'md5': '8e3c576bf2e9bfff4d76565f56f94c9c', + 'md5': '4294cf98bc165f218aaa0b89e0fd8042', 'info_dict': { 'id': '0_ipq1gsai', - 'ext': 'mp4', + 'ext': 'mov', 'title': 'Fast Fingers of Fate', - 'description': 'md5:587e79fbbd0d73b148bc596d99ce48e6', + 'description': 'md5:3539013ddcbfa64b2a6d1b38d910868a', 'timestamp': 1428035648, 'upload_date': '20150403', 'uploader_id': 'batchUser', diff --git a/youtube_dl/extractor/engadget.py b/youtube_dl/extractor/engadget.py index 4ea37ebd9..e4180701d 100644 --- a/youtube_dl/extractor/engadget.py +++ b/youtube_dl/extractor/engadget.py @@ -10,7 +10,7 @@ from ..utils import ( class EngadgetIE(InfoExtractor): _VALID_URL = r'''(?x)https?://www.engadget.com/ - (?:video/5min/(?P<id>\d+)| + (?:video(?:/5min)?/(?P<id>\d+)| [\d/]+/.*?) ''' diff --git a/youtube_dl/extractor/escapist.py b/youtube_dl/extractor/escapist.py index c85b4c458..a3d7bbbcb 100644 --- a/youtube_dl/extractor/escapist.py +++ b/youtube_dl/extractor/escapist.py @@ -3,13 +3,12 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import compat_urllib_request - from ..utils import ( determine_ext, clean_html, int_or_none, float_or_none, + sanitized_Request, ) @@ -75,7 +74,7 @@ class EscapistIE(InfoExtractor): video_id = ims_video['videoID'] key = ims_video['hash'] - config_req = compat_urllib_request.Request( + config_req = sanitized_Request( 'http://www.escapistmagazine.com/videos/' 'vidconfig.php?videoID=%s&hash=%s' % (video_id, key)) config_req.add_header('Referer', url) diff --git a/youtube_dl/extractor/esri.py b/youtube_dl/extractor/esri.py index bf5d2019f..d4205d7fb 100644 --- a/youtube_dl/extractor/esri.py +++ b/youtube_dl/extractor/esri.py @@ -61,7 +61,7 @@ class EsriVideoIE(InfoExtractor): webpage, 'duration', fatal=False)) upload_date = unified_strdate(self._html_search_meta( - 'last-modified', webpage, 'upload date', fatal=None)) + 'last-modified', webpage, 'upload date', fatal=False)) return { 'id': video_id, diff --git a/youtube_dl/extractor/europa.py b/youtube_dl/extractor/europa.py new file mode 100644 index 000000000..adc43919e --- /dev/null +++ b/youtube_dl/extractor/europa.py @@ -0,0 +1,93 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + int_or_none, + orderedSet, + parse_duration, + qualities, + unified_strdate, + xpath_text +) + + +class EuropaIE(InfoExtractor): + _VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)' + _TESTS = [{ + 'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758', + 'md5': '574f080699ddd1e19a675b0ddf010371', + 'info_dict': { + 'id': 'I107758', + 'ext': 'mp4', + 'title': 'TRADE - Wikileaks on TTIP', + 'description': 'NEW LIVE EC Midday press briefing of 11/08/2015', + 'thumbnail': 're:^https?://.*\.jpg$', + 'upload_date': '20150811', + 'duration': 34, + 'view_count': int, + 'formats': 'mincount:3', + } + }, { + 'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786', + 'only_matching': True, + }, { + 'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + playlist = self._download_xml( + 'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id) + + def get_item(type_, preference): + items = {} + for item in playlist.findall('./info/%s/item' % type_): + lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None) + if lang and label: + items[lang] = label.strip() + for p in preference: + if items.get(p): + return items[p] + + query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + preferred_lang = query.get('sitelang', ('en', ))[0] + + preferred_langs = orderedSet((preferred_lang, 'en', 'int')) + + title = get_item('title', preferred_langs) or video_id + description = get_item('description', preferred_langs) + thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail') + upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date')) + duration = parse_duration(xpath_text(playlist, './info/duration', 'duration')) + view_count = int_or_none(xpath_text(playlist, './info/views', 'views')) + + language_preference = qualities(preferred_langs[::-1]) + + formats = [] + for file_ in playlist.findall('./files/file'): + video_url = xpath_text(file_, './url') + if not video_url: + continue + lang = xpath_text(file_, './lg') + formats.append({ + 'url': video_url, + 'format_id': lang, + 'format_note': xpath_text(file_, './lglabel'), + 'language_preference': language_preference(lang) + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnmail, + 'upload_date': upload_date, + 'duration': duration, + 'view_count': view_count, + 'formats': formats + } diff --git a/youtube_dl/extractor/everyonesmixtape.py b/youtube_dl/extractor/everyonesmixtape.py index d872d828f..493d38af8 100644 --- a/youtube_dl/extractor/everyonesmixtape.py +++ b/youtube_dl/extractor/everyonesmixtape.py @@ -3,11 +3,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -42,7 +40,7 @@ class EveryonesMixtapeIE(InfoExtractor): playlist_id = mobj.group('id') pllist_url = 'http://everyonesmixtape.com/mixtape.php?a=getMixes&u=-1&linked=%s&explore=' % playlist_id - pllist_req = compat_urllib_request.Request(pllist_url) + pllist_req = sanitized_Request(pllist_url) pllist_req.add_header('X-Requested-With', 'XMLHttpRequest') playlist_list = self._download_json( @@ -55,7 +53,7 @@ class EveryonesMixtapeIE(InfoExtractor): raise ExtractorError('Playlist id not found') pl_url = 'http://everyonesmixtape.com/mixtape.php?a=getMix&id=%s&userId=null&code=' % playlist_no - pl_req = compat_urllib_request.Request(pl_url) + pl_req = sanitized_Request(pl_url) pl_req.add_header('X-Requested-With', 'XMLHttpRequest') playlist = self._download_json( pl_req, playlist_id, note='Downloading playlist info') diff --git a/youtube_dl/extractor/expotv.py b/youtube_dl/extractor/expotv.py index a38b773e8..1585a03bb 100644 --- a/youtube_dl/extractor/expotv.py +++ b/youtube_dl/extractor/expotv.py @@ -33,20 +33,27 @@ class ExpoTVIE(InfoExtractor): webpage = self._download_webpage(url, video_id) player_key = self._search_regex( r'<param name="playerKey" value="([^"]+)"', webpage, 'player key') - config_url = 'http://client.expotv.com/video/config/%s/%s' % ( - video_id, player_key) config = self._download_json( - config_url, video_id, - note='Downloading video configuration') + 'http://client.expotv.com/video/config/%s/%s' % (video_id, player_key), + video_id, 'Downloading video configuration') - formats = [{ - 'url': fcfg['file'], - 'height': int_or_none(fcfg.get('height')), - 'format_note': fcfg.get('label'), - 'ext': self._search_regex( - r'filename=.*\.([a-z0-9_A-Z]+)&', fcfg['file'], - 'file extension', default=None), - } for fcfg in config['sources']] + formats = [] + for fcfg in config['sources']: + media_url = fcfg.get('file') + if not media_url: + continue + if fcfg.get('type') == 'm3u8': + formats.extend(self._extract_m3u8_formats( + media_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls')) + else: + formats.append({ + 'url': media_url, + 'height': int_or_none(fcfg.get('height')), + 'format_id': fcfg.get('label'), + 'ext': self._search_regex( + r'filename=.*\.([a-z0-9_A-Z]+)&', media_url, + 'file extension', default=None) or fcfg.get('type'), + }) self._sort_formats(formats) title = self._og_search_title(webpage) diff --git a/youtube_dl/extractor/extremetube.py b/youtube_dl/extractor/extremetube.py index c826a5404..3403581fd 100644 --- a/youtube_dl/extractor/extremetube.py +++ b/youtube_dl/extractor/extremetube.py @@ -3,23 +3,20 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_parse_qs, - compat_urllib_request, -) from ..utils import ( - qualities, + int_or_none, + sanitized_Request, str_to_int, ) class ExtremeTubeIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)' + _VALID_URL = r'https?://(?:www\.)?extremetube\.com/(?:[^/]+/)?video/(?P<id>[^/#?&]+)' _TESTS = [{ 'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431', 'md5': '344d0c6d50e2f16b06e49ca011d8ac69', 'info_dict': { - 'id': '652431', + 'id': 'music-video-14-british-euro-brit-european-cumshots-swallow-652431', 'ext': 'mp4', 'title': 'Music Video 14 british euro brit european cumshots swallow', 'uploader': 'unknown', @@ -29,14 +26,18 @@ class ExtremeTubeIE(InfoExtractor): }, { 'url': 'http://www.extremetube.com/gay/video/abcde-1234', 'only_matching': True, + }, { + 'url': 'http://www.extremetube.com/video/latina-slut-fucked-by-fat-black-dick', + 'only_matching': True, + }, { + 'url': 'http://www.extremetube.com/video/652431', + 'only_matching': True, }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - url = 'http://www.' + mobj.group('url') + video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) @@ -49,20 +50,36 @@ class ExtremeTubeIE(InfoExtractor): r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>', webpage, 'view count', fatal=False)) - flash_vars = compat_parse_qs(self._search_regex( - r'<param[^>]+?name="flashvars"[^>]+?value="([^"]+)"', webpage, 'flash vars')) + flash_vars = self._parse_json( + self._search_regex( + r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flash vars'), + video_id) formats = [] - quality = qualities(['180p', '240p', '360p', '480p', '720p', '1080p']) - for k, vals in flash_vars.items(): - m = re.match(r'quality_(?P<quality>[0-9]+p)$', k) - if m is not None: - formats.append({ - 'format_id': m.group('quality'), - 'quality': quality(m.group('quality')), - 'url': vals[0], + for quality_key, video_url in flash_vars.items(): + height = int_or_none(self._search_regex( + r'quality_(\d+)[pP]$', quality_key, 'height', default=None)) + if not height: + continue + f = { + 'url': video_url, + } + mobj = re.search( + r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url) + if mobj: + height = int(mobj.group('height')) + bitrate = int(mobj.group('bitrate')) + f.update({ + 'format_id': '%dp-%dk' % (height, bitrate), + 'height': height, + 'tbr': bitrate, }) - + else: + f.update({ + 'format_id': '%dp' % height, + 'height': height, + }) + formats.append(f) self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index 178a7ca4c..5e43f2359 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -7,15 +7,14 @@ import socket from .common import InfoExtractor from ..compat import ( compat_http_client, - compat_str, compat_urllib_error, compat_urllib_parse_unquote, - compat_urllib_request, ) from ..utils import ( + error_to_compat_str, ExtractorError, - int_or_none, limit_length, + sanitized_Request, urlencode_postdata, get_element_by_id, clean_html, @@ -74,8 +73,8 @@ class FacebookIE(InfoExtractor): if useremail is None: return - login_page_req = compat_urllib_request.Request(self._LOGIN_URL) - login_page_req.add_header('Cookie', 'locale=en_US') + login_page_req = sanitized_Request(self._LOGIN_URL) + self._set_cookie('facebook.com', 'locale', 'en_US') login_page = self._download_webpage(login_page_req, None, note='Downloading login page', errnote='Unable to download login page') @@ -95,29 +94,41 @@ class FacebookIE(InfoExtractor): 'timezone': '-60', 'trynum': '1', } - request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form)) + request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') try: login_results = self._download_webpage(request, None, note='Logging in', errnote='unable to fetch login page') if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: + error = self._html_search_regex( + r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>', + login_results, 'login error', default=None, group='error') + if error: + raise ExtractorError('Unable to login: %s' % error, expected=True) self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') return + fb_dtsg = self._search_regex( + r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None) + h = self._search_regex( + r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None) + + if not fb_dtsg or not h: + return + check_form = { - 'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'), - 'h': self._search_regex( - r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'), + 'fb_dtsg': fb_dtsg, + 'h': h, 'name_action_selected': 'dont_save', } - check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) + check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) check_req.add_header('Content-Type', 'application/x-www-form-urlencoded') check_response = self._download_webpage(check_req, None, note='Confirming login') if re.search(r'id="checkpointSubmitButton"', check_response) is not None: self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.') except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.report_warning('unable to log in: %s' % compat_str(err)) + self._downloader.report_warning('unable to log in: %s' % error_to_compat_str(err)) return def _real_initialize(self): @@ -142,16 +153,20 @@ class FacebookIE(InfoExtractor): data = dict(json.loads(m.group(1))) params_raw = compat_urllib_parse_unquote(data['params']) params = json.loads(params_raw) - video_data = params['video_data'][0] formats = [] - for quality in ['sd', 'hd']: - src = video_data.get('%s_src' % quality) - if src is not None: - formats.append({ - 'format_id': quality, - 'url': src, - }) + for format_id, f in params['video_data'].items(): + if not f or not isinstance(f, list): + continue + for quality in ('sd', 'hd'): + for src_type in ('src', 'src_no_ratelimit'): + src = f[0].get('%s_%s' % (quality, src_type)) + if src: + formats.append({ + 'format_id': '%s_%s_%s' % (format_id, quality, src_type), + 'url': src, + 'preference': -10 if format_id == 'progressive' else 0, + }) if not formats: raise ExtractorError('Cannot find video formats') @@ -161,7 +176,7 @@ class FacebookIE(InfoExtractor): if not video_title: video_title = self._html_search_regex( r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>', - webpage, 'alternative title', fatal=False) + webpage, 'alternative title', default=None) video_title = limit_length(video_title, 80) if not video_title: video_title = 'Facebook video #%s' % video_id @@ -171,7 +186,5 @@ class FacebookIE(InfoExtractor): 'id': video_id, 'title': video_title, 'formats': formats, - 'duration': int_or_none(video_data.get('video_duration')), - 'thumbnail': video_data.get('thumbnail_src'), 'uploader': uploader, } diff --git a/youtube_dl/extractor/faz.py b/youtube_dl/extractor/faz.py index cebdd0193..6f9b003c2 100644 --- a/youtube_dl/extractor/faz.py +++ b/youtube_dl/extractor/faz.py @@ -2,6 +2,11 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..utils import ( + xpath_element, + xpath_text, + int_or_none, +) class FazIE(InfoExtractor): @@ -37,31 +42,32 @@ class FazIE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) + description = self._og_search_description(webpage) config_xml_url = self._search_regex( - r'writeFLV\(\'(.+?)\',', webpage, 'config xml url') + r'videoXMLURL\s*=\s*"([^"]+)', webpage, 'config xml url') config = self._download_xml( config_xml_url, video_id, 'Downloading config xml') - encodings = config.find('ENCODINGS') + encodings = xpath_element(config, 'ENCODINGS', 'encodings', True) formats = [] for pref, code in enumerate(['LOW', 'HIGH', 'HQ']): - encoding = encodings.find(code) - if encoding is None: - continue - encoding_url = encoding.find('FILENAME').text - formats.append({ - 'url': encoding_url, - 'format_id': code.lower(), - 'quality': pref, - }) + encoding = xpath_element(encodings, code) + if encoding: + encoding_url = xpath_text(encoding, 'FILENAME') + if encoding_url: + formats.append({ + 'url': encoding_url, + 'format_id': code.lower(), + 'quality': pref, + 'tbr': int_or_none(xpath_text(encoding, 'AVERAGEBITRATE')), + }) self._sort_formats(formats) - descr = self._html_search_regex( - r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False) return { 'id': video_id, 'title': self._og_search_title(webpage), 'formats': formats, - 'description': descr, - 'thumbnail': config.find('STILL/STILL_BIG').text, + 'description': description.strip() if description else None, + 'thumbnail': xpath_text(config, 'STILL/STILL_BIG'), + 'duration': int_or_none(xpath_text(config, 'DURATION')), } diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py index a406945e8..4c81271d3 100644 --- a/youtube_dl/extractor/fc2.py +++ b/youtube_dl/extractor/fc2.py @@ -12,6 +12,7 @@ from ..compat import ( from ..utils import ( encode_dict, ExtractorError, + sanitized_Request, ) @@ -36,8 +37,8 @@ class FC2IE(InfoExtractor): 'params': { 'username': 'ytdl@yt-dl.org', 'password': '(snip)', - 'skip': 'requires actual password' - } + }, + 'skip': 'requires actual password', }, { 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF', 'only_matching': True, @@ -57,7 +58,7 @@ class FC2IE(InfoExtractor): } login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8') - request = compat_urllib_request.Request( + request = sanitized_Request( 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data) login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in') @@ -66,7 +67,7 @@ class FC2IE(InfoExtractor): return False # this is also needed - login_redir = compat_urllib_request.Request('http://id.fc2.com/?mode=redirect&login=done') + login_redir = sanitized_Request('http://id.fc2.com/?mode=redirect&login=done') self._download_webpage( login_redir, None, note='Login redirect', errnote='Login redirect failed') diff --git a/youtube_dl/extractor/fczenit.py b/youtube_dl/extractor/fczenit.py new file mode 100644 index 000000000..f1f150ef2 --- /dev/null +++ b/youtube_dl/extractor/fczenit.py @@ -0,0 +1,41 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class FczenitIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/gl(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://fc-zenit.ru/video/gl6785/', + 'md5': '458bacc24549173fe5a5aa29174a5606', + 'info_dict': { + 'id': '6785', + 'ext': 'mp4', + 'title': '«Зенит-ТВ»: как Олег Шатов играл против «Урала»', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_title = self._html_search_regex(r'<div class=\"photoalbum__title\">([^<]+)', webpage, 'title') + + bitrates_raw = self._html_search_regex(r'bitrates:.*\n(.*)\]', webpage, 'video URL') + bitrates = re.findall(r'url:.?\'(.+?)\'.*?bitrate:.?([0-9]{3}?)', bitrates_raw) + + formats = [{ + 'url': furl, + 'tbr': tbr, + } for furl, tbr in bitrates] + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_title, + 'formats': formats, + } diff --git a/youtube_dl/extractor/fivemin.py b/youtube_dl/extractor/fivemin.py index 157094e8c..2955965d9 100644 --- a/youtube_dl/extractor/fivemin.py +++ b/youtube_dl/extractor/fivemin.py @@ -2,11 +2,15 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( - compat_str, compat_urllib_parse, + compat_parse_qs, + compat_urllib_parse_urlparse, + compat_urlparse, ) from ..utils import ( ExtractorError, + parse_duration, + replace_extension, ) @@ -28,6 +32,7 @@ class FiveMinIE(InfoExtractor): 'id': '518013791', 'ext': 'mp4', 'title': 'iPad Mini with Retina Display Review', + 'duration': 177, }, }, { @@ -38,9 +43,52 @@ class FiveMinIE(InfoExtractor): 'id': '518086247', 'ext': 'mp4', 'title': 'How to Make a Next-Level Fruit Salad', + 'duration': 184, }, }, ] + _ERRORS = { + 'ErrorVideoNotExist': 'We\'re sorry, but the video you are trying to watch does not exist.', + 'ErrorVideoNoLongerAvailable': 'We\'re sorry, but the video you are trying to watch is no longer available.', + 'ErrorVideoRejected': 'We\'re sorry, but the video you are trying to watch has been removed.', + 'ErrorVideoUserNotGeo': 'We\'re sorry, but the video you are trying to watch cannot be viewed from your current location.', + 'ErrorVideoLibraryRestriction': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.', + 'ErrorExposurePermission': 'We\'re sorry, but the video you are trying to watch is currently unavailable for viewing at this domain.', + } + _QUALITIES = { + 1: { + 'width': 640, + 'height': 360, + }, + 2: { + 'width': 854, + 'height': 480, + }, + 4: { + 'width': 1280, + 'height': 720, + }, + 8: { + 'width': 1920, + 'height': 1080, + }, + 16: { + 'width': 640, + 'height': 360, + }, + 32: { + 'width': 854, + 'height': 480, + }, + 64: { + 'width': 1280, + 'height': 720, + }, + 128: { + 'width': 640, + 'height': 360, + }, + } def _real_extract(self, url): video_id = self._match_id(url) @@ -59,26 +107,36 @@ class FiveMinIE(InfoExtractor): 'https://syn.5min.com/handlers/SenseHandler.ashx?' + query, video_id) if not response['success']: - err_msg = response['errorMessage'] - if err_msg == 'ErrorVideoUserNotGeo': - msg = 'Video not available from your location' - else: - msg = 'Aol said: %s' % err_msg - raise ExtractorError(msg, expected=True, video_id=video_id) + raise ExtractorError( + '%s said: %s' % ( + self.IE_NAME, + self._ERRORS.get(response['errorMessage'], response['errorMessage'])), + expected=True) info = response['binding'][0] - second_id = compat_str(int(video_id[:-2]) + 1) formats = [] - for quality, height in [(1, 320), (2, 480), (4, 720), (8, 1080)]: - if any(r['ID'] == quality for r in info['Renditions']): + parsed_video_url = compat_urllib_parse_urlparse(compat_parse_qs( + compat_urllib_parse_urlparse(info['EmbededURL']).query)['videoUrl'][0]) + for rendition in info['Renditions']: + if rendition['RenditionType'] == 'm3u8': + formats.extend(self._extract_m3u8_formats(rendition['Url'], video_id, m3u8_id='hls')) + elif rendition['RenditionType'] == 'aac': + continue + else: + rendition_url = compat_urlparse.urlunparse(parsed_video_url._replace(path=replace_extension(parsed_video_url.path.replace('//', '/%s/' % rendition['ID']), rendition['RenditionType']))) + quality = self._QUALITIES.get(rendition['ID'], {}) formats.append({ - 'format_id': compat_str(quality), - 'url': 'http://avideos.5min.com/%s/%s/%s_%s.mp4' % (second_id[-3:], second_id, video_id, quality), - 'height': height, + 'format_id': '%s-%d' % (rendition['RenditionType'], rendition['ID']), + 'url': rendition_url, + 'width': quality.get('width'), + 'height': quality.get('height'), }) + self._sort_formats(formats) return { 'id': video_id, 'title': info['Title'], + 'thumbnail': info.get('ThumbURL'), + 'duration': parse_duration(info.get('Duration')), 'formats': formats, } diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py index 190d9f9ad..5f6e65dae 100644 --- a/youtube_dl/extractor/fktv.py +++ b/youtube_dl/extractor/fktv.py @@ -1,13 +1,10 @@ from __future__ import unicode_literals -import re -import random -import json - from .common import InfoExtractor from ..utils import ( - get_element_by_id, clean_html, + determine_ext, + js_to_json, ) @@ -17,66 +14,38 @@ class FKTVIE(InfoExtractor): _TEST = { 'url': 'http://fernsehkritik.tv/folge-1', + 'md5': '21f0b0c99bce7d5b524eb1b17b1c6d79', 'info_dict': { - 'id': '00011', - 'ext': 'flv', + 'id': '1', + 'ext': 'mp4', 'title': 'Folge 1 vom 10. April 2007', - 'description': 'md5:fb4818139c7cfe6907d4b83412a6864f', + 'thumbnail': 're:^https?://.*\.jpg$', }, } def _real_extract(self, url): - episode = int(self._match_id(url)) - - video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%s.jpg' % episode - start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%s/Start' % episode, - episode) - playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage, - 'playlist', flags=re.DOTALL) - files = json.loads(re.sub('{[^{}]*?}', '{}', playlist)) - - videos = [] - for i, _ in enumerate(files, 1): - video_id = '%04d%d' % (episode, i) - video_url = 'http://fernsehkritik.tv/js/directme.php?file=%s%s.flv' % (episode, '' if i == 1 else '-%d' % i) - videos.append({ - 'ext': 'flv', - 'id': video_id, - 'url': video_url, - 'title': clean_html(get_element_by_id('eptitle', start_webpage)), - 'description': clean_html(get_element_by_id('contentlist', start_webpage)), - 'thumbnail': video_thumbnail - }) - return { - '_type': 'multi_video', - 'entries': videos, - 'id': 'folge-%s' % episode, - } - - -class FKTVPosteckeIE(InfoExtractor): - IE_NAME = 'fernsehkritik.tv:postecke' - _VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)' - _TEST = { - 'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120', - 'md5': '262f0adbac80317412f7e57b4808e5c4', - 'info_dict': { - 'id': '0120', - 'ext': 'flv', - 'title': 'Postecke 120', - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - episode = int(mobj.group('ep')) + episode = self._match_id(url) + + webpage = self._download_webpage( + 'http://fernsehkritik.tv/folge-%s/play' % episode, episode) + title = clean_html(self._html_search_regex( + '<h3>([^<]+)</h3>', webpage, 'title')) + thumbnail = self._search_regex(r'POSTER\s*=\s*"([^"]+)', webpage, 'thumbnail', fatal=False) + sources = self._parse_json(self._search_regex(r'(?s)MEDIA\s*=\s*(\[.+?\]);', webpage, 'media'), episode, js_to_json) + + formats = [] + for source in sources: + furl = source.get('src') + if furl: + formats.append({ + 'url': furl, + 'format_id': determine_ext(furl), + }) + self._sort_formats(formats) - server = random.randint(2, 4) - video_id = '%04d' % episode - video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode) - video_title = 'Postecke %d' % episode return { - 'id': video_id, - 'url': video_url, - 'title': video_title, + 'id': episode, + 'title': title, + 'formats': formats, + 'thumbnail': thumbnail, } diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py index 2fe76d661..18f439df9 100644 --- a/youtube_dl/extractor/flickr.py +++ b/youtube_dl/extractor/flickr.py @@ -1,67 +1,93 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor -from ..compat import compat_urllib_request +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, - find_xpath_attr, + int_or_none, + qualities, ) class FlickrIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*' + _VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/[\w\-_@]+/(?P<id>\d+)' _TEST = { 'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', - 'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b', + 'md5': '164fe3fa6c22e18d448d4d5af2330f31', 'info_dict': { 'id': '5645318632', - 'ext': 'mp4', - "description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.", - "uploader_id": "forestwander-nature-pictures", - "title": "Dark Hollow Waterfalls" + 'ext': 'mpg', + 'description': 'Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.', + 'title': 'Dark Hollow Waterfalls', + 'duration': 19, + 'timestamp': 1303528740, + 'upload_date': '20110423', + 'uploader_id': '10922353@N03', + 'uploader': 'Forest Wander', + 'comment_count': int, + 'view_count': int, + 'tags': list, } } - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) + _API_BASE_URL = 'https://api.flickr.com/services/rest?' - video_id = mobj.group('id') - video_uploader_id = mobj.group('uploader_id') - webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id - req = compat_urllib_request.Request(webpage_url) - req.add_header( - 'User-Agent', - # it needs a more recent version - 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20150101 Firefox/38.0 (Chrome)') - webpage = self._download_webpage(req, video_id) + def _call_api(self, method, video_id, api_key, note, secret=None): + query = { + 'photo_id': video_id, + 'method': 'flickr.%s' % method, + 'api_key': api_key, + 'format': 'json', + 'nojsoncallback': 1, + } + if secret: + query['secret'] = secret + data = self._download_json(self._API_BASE_URL + compat_urllib_parse.urlencode(query), video_id, note) + if data['stat'] != 'ok': + raise ExtractorError(data['message']) + return data - secret = self._search_regex(r'secret"\s*:\s*"(\w+)"', webpage, 'secret') + def _real_extract(self, url): + video_id = self._match_id(url) - first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self' - first_xml = self._download_xml(first_url, video_id, 'Downloading first data webpage') + api_key = self._download_json( + 'https://www.flickr.com/hermes_error_beacon.gne', video_id, + 'Downloading api key')['site_key'] - node_id = find_xpath_attr( - first_xml, './/{http://video.yahoo.com/YEP/1.0/}Item', 'id', - 'id').text + video_info = self._call_api( + 'photos.getInfo', video_id, api_key, 'Downloading video info')['photo'] + if video_info['media'] == 'video': + streams = self._call_api( + 'video.getStreamInfo', video_id, api_key, + 'Downloading streams info', video_info['secret'])['streams'] - second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1' - second_xml = self._download_xml(second_url, video_id, 'Downloading second data webpage') + preference = qualities( + ['288p', 'iphone_wifi', '100', '300', '700', '360p', 'appletv', '720p', '1080p', 'orig']) - self.report_extraction(video_id) + formats = [] + for stream in streams['stream']: + stream_type = str(stream.get('type')) + formats.append({ + 'format_id': stream_type, + 'url': stream['_content'], + 'preference': preference(stream_type), + }) + self._sort_formats(formats) - stream = second_xml.find('.//STREAM') - if stream is None: - raise ExtractorError('Unable to extract video url') - video_url = stream.attrib['APP'] + stream.attrib['FULLPATH'] + owner = video_info.get('owner', {}) - return { - 'id': video_id, - 'url': video_url, - 'ext': 'mp4', - 'title': self._og_search_title(webpage), - 'description': self._og_search_description(webpage), - 'thumbnail': self._og_search_thumbnail(webpage), - 'uploader_id': video_uploader_id, - } + return { + 'id': video_id, + 'title': video_info['title']['_content'], + 'description': video_info.get('description', {}).get('_content'), + 'formats': formats, + 'timestamp': int_or_none(video_info.get('dateuploaded')), + 'duration': int_or_none(video_info.get('video', {}).get('duration')), + 'uploader_id': owner.get('nsid'), + 'uploader': owner.get('realname'), + 'comment_count': int_or_none(video_info.get('comments', {}).get('_content')), + 'view_count': int_or_none(video_info.get('views')), + 'tags': [tag.get('_content') for tag in video_info.get('tags', {}).get('tag', [])] + } + else: + raise ExtractorError('not a video', expected=True) diff --git a/youtube_dl/extractor/footyroom.py b/youtube_dl/extractor/footyroom.py index 4c7dbca40..370fd006f 100644 --- a/youtube_dl/extractor/footyroom.py +++ b/youtube_dl/extractor/footyroom.py @@ -13,6 +13,7 @@ class FootyRoomIE(InfoExtractor): 'title': 'Schalke 04 0 – 2 Real Madrid', }, 'playlist_count': 3, + 'skip': 'Video for this match is not available', }, { 'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/', 'info_dict': { diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py index 3bb4f6239..fc4a5a0fb 100644 --- a/youtube_dl/extractor/fourtube.py +++ b/youtube_dl/extractor/fourtube.py @@ -3,12 +3,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( parse_duration, parse_iso8601, + sanitized_Request, str_to_int, ) @@ -46,10 +44,10 @@ class FourTubeIE(InfoExtractor): thumbnail = self._html_search_meta('thumbnailUrl', webpage) uploader_id = self._html_search_regex( r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">', - webpage, 'uploader id') + webpage, 'uploader id', fatal=False) uploader = self._html_search_regex( r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">', - webpage, 'uploader') + webpage, 'uploader', fatal=False) categories_html = self._search_regex( r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>', @@ -68,13 +66,24 @@ class FourTubeIE(InfoExtractor): webpage, 'like count', fatal=False)) duration = parse_duration(self._html_search_meta('duration', webpage)) - params_js = self._search_regex( - r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)', - webpage, 'initialization parameters' - ) - params = self._parse_json('[%s]' % params_js, video_id) - media_id = params[0] - sources = ['%s' % p for p in params[2]] + media_id = self._search_regex( + r'<button[^>]+data-id=(["\'])(?P<id>\d+)\1[^>]+data-quality=', webpage, + 'media id', default=None, group='id') + sources = [ + quality + for _, quality in re.findall(r'<button[^>]+data-quality=(["\'])(.+?)\1', webpage)] + if not (media_id and sources): + player_js = self._download_webpage( + self._search_regex( + r'<script[^>]id=(["\'])playerembed\1[^>]+src=(["\'])(?P<url>.+?)\2', + webpage, 'player JS', group='url'), + video_id, 'Downloading player JS') + params_js = self._search_regex( + r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)', + player_js, 'initialization parameters') + params = self._parse_json('[%s]' % params_js, video_id) + media_id = params[0] + sources = ['%s' % p for p in params[2]] token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format( media_id, '+'.join(sources)) @@ -82,7 +91,7 @@ class FourTubeIE(InfoExtractor): b'Content-Type': b'application/x-www-form-urlencoded', b'Origin': b'http://www.4tube.com', } - token_req = compat_urllib_request.Request(token_url, b'{}', headers) + token_req = sanitized_Request(token_url, b'{}', headers) tokens = self._download_json(token_req, video_id) formats = [{ 'url': tokens[format]['token'], diff --git a/youtube_dl/extractor/foxnews.py b/youtube_dl/extractor/foxnews.py index 3a4a59135..318ac013d 100644 --- a/youtube_dl/extractor/foxnews.py +++ b/youtube_dl/extractor/foxnews.py @@ -2,14 +2,10 @@ from __future__ import unicode_literals import re -from .common import InfoExtractor -from ..utils import ( - parse_iso8601, - int_or_none, -) +from .amp import AMPIE -class FoxNewsIE(InfoExtractor): +class FoxNewsIE(AMPIE): IE_DESC = 'Fox News and Fox Business Video' _VALID_URL = r'https?://(?P<host>video\.fox(?:news|business)\.com)/v/(?:video-embed\.html\?video_id=)?(?P<id>\d+)' _TESTS = [ @@ -20,10 +16,10 @@ class FoxNewsIE(InfoExtractor): 'id': '3937480', 'ext': 'flv', 'title': 'Frozen in Time', - 'description': 'Doctors baffled by 16-year-old girl that is the size of a toddler', + 'description': '16-year-old girl is size of toddler', 'duration': 265, - 'timestamp': 1304411491, - 'upload_date': '20110503', + # 'timestamp': 1304411491, + # 'upload_date': '20110503', 'thumbnail': 're:^https?://.*\.jpg$', }, }, @@ -34,10 +30,10 @@ class FoxNewsIE(InfoExtractor): 'id': '3922535568001', 'ext': 'mp4', 'title': "Rep. Luis Gutierrez on if Obama's immigration plan is legal", - 'description': "Congressman discusses the president's executive action", + 'description': "Congressman discusses president's plan", 'duration': 292, - 'timestamp': 1417662047, - 'upload_date': '20141204', + # 'timestamp': 1417662047, + # 'upload_date': '20141204', 'thumbnail': 're:^https?://.*\.jpg$', }, }, @@ -52,52 +48,9 @@ class FoxNewsIE(InfoExtractor): ] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - host = mobj.group('host') + host, video_id = re.match(self._VALID_URL, url).groups() - video = self._download_json( - 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id), video_id) - - item = video['channel']['item'] - title = item['title'] - description = item['description'] - timestamp = parse_iso8601(item['dc-date']) - - media_group = item['media-group'] - duration = None - formats = [] - for media in media_group['media-content']: - attributes = media['@attributes'] - video_url = attributes['url'] - if video_url.endswith('.f4m'): - formats.extend(self._extract_f4m_formats(video_url + '?hdcore=3.4.0&plugin=aasp-3.4.0.132.124', video_id)) - elif video_url.endswith('.m3u8'): - formats.extend(self._extract_m3u8_formats(video_url, video_id, 'flv')) - elif not video_url.endswith('.smil'): - duration = int_or_none(attributes.get('duration')) - formats.append({ - 'url': video_url, - 'format_id': media['media-category']['@attributes']['label'], - 'preference': 1, - 'vbr': int_or_none(attributes.get('bitrate')), - 'filesize': int_or_none(attributes.get('fileSize')) - }) - self._sort_formats(formats) - - media_thumbnail = media_group['media-thumbnail']['@attributes'] - thumbnails = [{ - 'url': media_thumbnail['url'], - 'width': int_or_none(media_thumbnail.get('width')), - 'height': int_or_none(media_thumbnail.get('height')), - }] if media_thumbnail else [] - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'duration': duration, - 'timestamp': timestamp, - 'formats': formats, - 'thumbnails': thumbnails, - } + info = self._extract_feed_info( + 'http://%s/v/feed/video/%s.js?template=fox' % (host, video_id)) + info['id'] = video_id + return info diff --git a/youtube_dl/extractor/franceculture.py b/youtube_dl/extractor/franceculture.py index 1e83a4e7e..e2ca96283 100644 --- a/youtube_dl/extractor/franceculture.py +++ b/youtube_dl/extractor/franceculture.py @@ -8,6 +8,7 @@ from ..compat import ( from ..utils import ( determine_ext, int_or_none, + ExtractorError, ) @@ -22,14 +23,13 @@ class FranceCultureIE(InfoExtractor): 'alt_title': 'Carnet nomade | 13-14', 'vcodec': 'none', 'upload_date': '20140301', - 'thumbnail': r're:^http://www\.franceculture\.fr/.*/images/player/Carnet-nomade\.jpg$', - 'description': 'startswith:Avec :Jean-Baptiste Péretié pour son documentaire sur Arte "La revanche des « geeks », une enquête menée aux Etats', + 'thumbnail': r're:^http://static\.franceculture\.fr/.*/images/player/Carnet-nomade\.jpg$', + 'description': 'startswith:Avec :Jean-Baptiste Péretié pour son documentaire sur Arte "La revanche', 'timestamp': 1393700400, } } - def _real_extract(self, url): - video_id = self._match_id(url) + def _extract_from_player(self, url, video_id): webpage = self._download_webpage(url, video_id) video_path = self._search_regex( @@ -42,6 +42,9 @@ class FranceCultureIE(InfoExtractor): r'<a id="player".*?>\s+<img src="([^"]+)"', webpage, 'thumbnail', fatal=False) + display_id = self._search_regex( + r'<span class="path-diffusion">emission-(.*?)</span>', webpage, 'display_id') + title = self._html_search_regex( r'<span class="title-diffusion">(.*?)</span>', webpage, 'title') alt_title = self._html_search_regex( @@ -66,4 +69,37 @@ class FranceCultureIE(InfoExtractor): 'alt_title': alt_title, 'thumbnail': thumbnail, 'description': description, + 'display_id': display_id, } + + def _real_extract(self, url): + video_id = self._match_id(url) + return self._extract_from_player(url, video_id) + + +class FranceCultureEmissionIE(FranceCultureIE): + _VALID_URL = r'https?://(?:www\.)?franceculture\.fr/emission-(?P<id>[^?#]+)' + _TEST = { + 'url': 'http://www.franceculture.fr/emission-les-carnets-de-la-creation-jean-gabriel-periot-cineaste-2015-10-13', + 'info_dict': { + 'title': 'Jean-Gabriel Périot, cinéaste', + 'alt_title': 'Les Carnets de la création', + 'id': '5093239', + 'display_id': 'les-carnets-de-la-creation-jean-gabriel-periot-cineaste-2015-10-13', + 'ext': 'mp3', + 'timestamp': 1444762500, + 'upload_date': '20151013', + 'description': 'startswith:Aujourd\'hui dans "Les carnets de la création", le cinéaste', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + video_path = self._html_search_regex( + r'<a class="rf-player-open".*?href="([^"]+)"', webpage, 'video path', 'no_path_player') + if video_path == 'no_path_player': + raise ExtractorError('no player : no sound in this page.', expected=True) + new_id = self._search_regex('play=(?P<id>[0-9]+)', video_path, 'new_id', group='id') + video_url = compat_urlparse.urljoin(url, video_path) + return self._extract_from_player(video_url, new_id) diff --git a/youtube_dl/extractor/franceinter.py b/youtube_dl/extractor/franceinter.py index 6613ee17a..fdc51f44f 100644 --- a/youtube_dl/extractor/franceinter.py +++ b/youtube_dl/extractor/franceinter.py @@ -1,8 +1,6 @@ # coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor from ..utils import int_or_none @@ -23,8 +21,7 @@ class FranceInterIE(InfoExtractor): } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) @@ -33,7 +30,7 @@ class FranceInterIE(InfoExtractor): video_url = 'http://www.franceinter.fr/' + path title = self._html_search_regex( - r'<span class="title">(.+?)</span>', webpage, 'title') + r'<span class="title-diffusion">(.+?)</span>', webpage, 'title') description = self._html_search_regex( r'<span class="description">(.*?)</span>', webpage, 'description', fatal=False) diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py index 129984a5f..8e60cf60f 100644 --- a/youtube_dl/extractor/francetv.py +++ b/youtube_dl/extractor/francetv.py @@ -83,6 +83,14 @@ class FranceTVBaseInfoExtractor(InfoExtractor): if subtitle: title += ' - %s' % subtitle + subtitles = {} + subtitles_list = [{ + 'url': subformat['url'], + 'ext': subformat.get('format'), + } for subformat in info.get('subtitles', []) if subformat.get('url')] + if subtitles_list: + subtitles['fr'] = subtitles_list + return { 'id': video_id, 'title': title, @@ -91,20 +99,27 @@ class FranceTVBaseInfoExtractor(InfoExtractor): 'duration': int_or_none(info.get('real_duration')) or parse_duration(info['duree']), 'timestamp': int_or_none(info['diffusion']['timestamp']), 'formats': formats, + 'subtitles': subtitles, } class PluzzIE(FranceTVBaseInfoExtractor): IE_NAME = 'pluzz.francetv.fr' - _VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html' + _VALID_URL = r'https?://(?:m\.)?pluzz\.francetv\.fr/videos/(?P<id>.+?)\.html' # Can't use tests, videos expire in 7 days def _real_extract(self, url): - title = re.match(self._VALID_URL, url).group(1) - webpage = self._download_webpage(url, title) - video_id = self._search_regex( - r'data-diffusion="(\d+)"', webpage, 'ID') + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + video_id = self._html_search_meta( + 'id_video', webpage, 'video id', default=None) + if not video_id: + video_id = self._search_regex( + r'data-diffusion=["\'](\d+)', webpage, 'video id') + return self._extract_video(video_id, 'Pluzz') @@ -120,6 +135,9 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor): 'title': 'Soir 3', 'upload_date': '20130826', 'timestamp': 1377548400, + 'subtitles': { + 'fr': 'mincount:2', + }, }, }, { 'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html', diff --git a/youtube_dl/extractor/funimation.py b/youtube_dl/extractor/funimation.py new file mode 100644 index 000000000..0f37ed786 --- /dev/null +++ b/youtube_dl/extractor/funimation.py @@ -0,0 +1,191 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + clean_html, + determine_ext, + encode_dict, + int_or_none, + sanitized_Request, + ExtractorError, + urlencode_postdata +) + + +class FunimationIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?funimation\.com/shows/[^/]+/videos/(?:official|promotional)/(?P<id>[^/?#&]+)' + + _NETRC_MACHINE = 'funimation' + + _TESTS = [{ + 'url': 'http://www.funimation.com/shows/air/videos/official/breeze', + 'info_dict': { + 'id': '658', + 'display_id': 'breeze', + 'ext': 'mp4', + 'title': 'Air - 1 - Breeze', + 'description': 'md5:1769f43cd5fc130ace8fd87232207892', + 'thumbnail': 're:https?://.*\.jpg', + }, + }, { + 'url': 'http://www.funimation.com/shows/hacksign/videos/official/role-play', + 'info_dict': { + 'id': '31128', + 'display_id': 'role-play', + 'ext': 'mp4', + 'title': '.hack//SIGN - 1 - Role Play', + 'description': 'md5:b602bdc15eef4c9bbb201bb6e6a4a2dd', + 'thumbnail': 're:https?://.*\.jpg', + }, + }, { + 'url': 'http://www.funimation.com/shows/attack-on-titan-junior-high/videos/promotional/broadcast-dub-preview', + 'info_dict': { + 'id': '9635', + 'display_id': 'broadcast-dub-preview', + 'ext': 'mp4', + 'title': 'Attack on Titan: Junior High - Broadcast Dub Preview', + 'description': 'md5:f8ec49c0aff702a7832cd81b8a44f803', + 'thumbnail': 're:https?://.*\.(?:jpg|png)', + }, + }] + + def _login(self): + (username, password) = self._get_login_info() + if username is None: + return + data = urlencode_postdata(encode_dict({ + 'email_field': username, + 'password_field': password, + })) + login_request = sanitized_Request('http://www.funimation.com/login', data, headers={ + 'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0', + 'Content-Type': 'application/x-www-form-urlencoded' + }) + login_page = self._download_webpage( + login_request, None, 'Logging in as %s' % username) + if any(p in login_page for p in ('funimation.com/logout', '>Log Out<')): + return + error = self._html_search_regex( + r'(?s)<div[^>]+id=["\']errorMessages["\'][^>]*>(.+?)</div>', + login_page, 'error messages', default=None) + if error: + raise ExtractorError('Unable to login: %s' % error, expected=True) + raise ExtractorError('Unable to log in') + + def _real_initialize(self): + self._login() + + def _real_extract(self, url): + display_id = self._match_id(url) + + errors = [] + formats = [] + + ERRORS_MAP = { + 'ERROR_MATURE_CONTENT_LOGGED_IN': 'matureContentLoggedIn', + 'ERROR_MATURE_CONTENT_LOGGED_OUT': 'matureContentLoggedOut', + 'ERROR_SUBSCRIPTION_LOGGED_OUT': 'subscriptionLoggedOut', + 'ERROR_VIDEO_EXPIRED': 'videoExpired', + 'ERROR_TERRITORY_UNAVAILABLE': 'territoryUnavailable', + 'SVODBASIC_SUBSCRIPTION_IN_PLAYER': 'basicSubscription', + 'SVODNON_SUBSCRIPTION_IN_PLAYER': 'nonSubscription', + 'ERROR_PLAYER_NOT_RESPONDING': 'playerNotResponding', + 'ERROR_UNABLE_TO_CONNECT_TO_CDN': 'unableToConnectToCDN', + 'ERROR_STREAM_NOT_FOUND': 'streamNotFound', + } + + USER_AGENTS = ( + # PC UA is served with m3u8 that provides some bonus lower quality formats + ('pc', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'), + # Mobile UA allows to extract direct links and also does not fail when + # PC UA fails with hulu error (e.g. + # http://www.funimation.com/shows/hacksign/videos/official/role-play) + ('mobile', 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'), + ) + + for kind, user_agent in USER_AGENTS: + request = sanitized_Request(url) + request.add_header('User-Agent', user_agent) + webpage = self._download_webpage( + request, display_id, 'Downloading %s webpage' % kind) + + playlist = self._parse_json( + self._search_regex( + r'var\s+playersData\s*=\s*(\[.+?\]);\n', + webpage, 'players data'), + display_id)[0]['playlist'] + + items = next(item['items'] for item in playlist if item.get('items')) + item = next(item for item in items if item.get('itemAK') == display_id) + + error_messages = {} + video_error_messages = self._search_regex( + r'var\s+videoErrorMessages\s*=\s*({.+?});\n', + webpage, 'error messages', default=None) + if video_error_messages: + error_messages_json = self._parse_json(video_error_messages, display_id, fatal=False) + if error_messages_json: + for _, error in error_messages_json.items(): + type_ = error.get('type') + description = error.get('description') + content = error.get('content') + if type_ == 'text' and description and content: + error_message = ERRORS_MAP.get(description) + if error_message: + error_messages[error_message] = content + + for video in item.get('videoSet', []): + auth_token = video.get('authToken') + if not auth_token: + continue + funimation_id = video.get('FUNImationID') or video.get('videoId') + preference = 1 if video.get('languageMode') == 'dub' else 0 + if not auth_token.startswith('?'): + auth_token = '?%s' % auth_token + for quality, height in (('sd', 480), ('hd', 720), ('hd1080', 1080)): + format_url = video.get('%sUrl' % quality) + if not format_url: + continue + if not format_url.startswith(('http', '//')): + errors.append(format_url) + continue + if determine_ext(format_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + format_url + auth_token, display_id, 'mp4', entry_protocol='m3u8_native', + preference=preference, m3u8_id='%s-hls' % funimation_id, fatal=False)) + else: + tbr = int_or_none(self._search_regex( + r'-(\d+)[Kk]', format_url, 'tbr', default=None)) + formats.append({ + 'url': format_url + auth_token, + 'format_id': '%s-http-%dp' % (funimation_id, height), + 'height': height, + 'tbr': tbr, + 'preference': preference, + }) + + if not formats and errors: + raise ExtractorError( + '%s returned error: %s' + % (self.IE_NAME, clean_html(error_messages.get(errors[0], errors[0]))), + expected=True) + + self._sort_formats(formats) + + title = item['title'] + artist = item.get('artist') + if artist: + title = '%s - %s' % (artist, title) + description = self._og_search_description(webpage) or item.get('description') + thumbnail = self._og_search_thumbnail(webpage) or item.get('posterUrl') + video_id = item.get('itemId') or display_id + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py index f5f13689c..4c4a87e2a 100644 --- a/youtube_dl/extractor/funnyordie.py +++ b/youtube_dl/extractor/funnyordie.py @@ -45,11 +45,18 @@ class FunnyOrDieIE(InfoExtractor): links.sort(key=lambda link: 1 if link[1] == 'mp4' else 0) - bitrates = self._html_search_regex(r'<source src="[^"]+/v,((?:\d+,)+)\.mp4\.csmil', webpage, 'video bitrates') - bitrates = [int(b) for b in bitrates.rstrip(',').split(',')] - bitrates.sort() + m3u8_url = self._search_regex( + r'<source[^>]+src=(["\'])(?P<url>.+?/master\.m3u8)\1', + webpage, 'm3u8 url', default=None, group='url') formats = [] + + formats.extend(self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) + + bitrates = [int(bitrate) for bitrate in re.findall(r'[,/]v(\d+)[,/]', m3u8_url)] + bitrates.sort() + for bitrate in bitrates: for link in links: formats.append({ diff --git a/youtube_dl/extractor/gameinformer.py b/youtube_dl/extractor/gameinformer.py new file mode 100644 index 000000000..25870c131 --- /dev/null +++ b/youtube_dl/extractor/gameinformer.py @@ -0,0 +1,43 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import int_or_none + + +class GameInformerIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>.+)\.aspx' + _TEST = { + 'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx', + 'info_dict': { + 'id': '4515472681001', + 'ext': 'm3u8', + 'title': 'Replay - Animal Crossing', + 'description': 'md5:2e211891b215c85d061adc7a4dd2d930', + 'timestamp': 1443457610706, + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + bc_api_url = self._search_regex(r"getVideo\('([^']+)'", webpage, 'brightcove api url') + json_data = self._download_json( + bc_api_url + '&video_fields=id,name,shortDescription,publishedDate,videoStillURL,length,IOSRenditions', + display_id) + + return { + 'id': compat_str(json_data['id']), + 'display_id': display_id, + 'url': json_data['IOSRenditions'][0]['url'], + 'title': json_data['name'], + 'description': json_data.get('shortDescription'), + 'timestamp': int_or_none(json_data.get('publishedDate')), + 'duration': int_or_none(json_data.get('length')), + } diff --git a/youtube_dl/extractor/gametrailers.py b/youtube_dl/extractor/gametrailers.py index a6ab795ae..c3f031d9c 100644 --- a/youtube_dl/extractor/gametrailers.py +++ b/youtube_dl/extractor/gametrailers.py @@ -1,19 +1,62 @@ from __future__ import unicode_literals -from .mtv import MTVServicesInfoExtractor +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_age_limit, + url_basename, +) -class GametrailersIE(MTVServicesInfoExtractor): - _VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)' +class GametrailersIE(InfoExtractor): + _VALID_URL = r'http://www\.gametrailers\.com/videos/view/[^/]+/(?P<id>.+)' + _TEST = { - 'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer', - 'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7', + 'url': 'http://www.gametrailers.com/videos/view/gametrailers-com/116437-Just-Cause-3-Review', + 'md5': 'f28c4efa0bdfaf9b760f6507955b6a6a', 'info_dict': { - 'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d', + 'id': '2983958', 'ext': 'mp4', - 'title': 'E3 2013: Debut Trailer', - 'description': 'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!', + 'display_id': '116437-Just-Cause-3-Review', + 'title': 'Just Cause 3 - Review', + 'description': 'It\'s a lot of fun to shoot at things and then watch them explode in Just Cause 3, but should there be more to the experience than that?', }, } - _FEED_URL = 'http://www.gametrailers.com/feeds/mrss' + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + title = self._html_search_regex( + r'<title>(.+?)\|', webpage, 'title').strip() + embed_url = self._proto_relative_url( + self._search_regex( + r'src=\'(//embed.gametrailers.com/embed/[^\']+)\'', webpage, + 'embed url'), + scheme='http:') + video_id = url_basename(embed_url) + embed_page = self._download_webpage(embed_url, video_id) + embed_vars_json = self._search_regex( + r'(?s)var embedVars = (\{.*?\})\s*</script>', embed_page, + 'embed vars') + info = self._parse_json(embed_vars_json, video_id) + + formats = [] + for media in info['media']: + if media['mediaPurpose'] == 'play': + formats.append({ + 'url': media['uri'], + 'height': media['height'], + 'width:': media['width'], + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'formats': formats, + 'thumbnail': info.get('thumbUri'), + 'description': self._og_search_description(webpage), + 'duration': int_or_none(info.get('videoLengthInSeconds')), + 'age_limit': parse_age_limit(info.get('audienceRating')), + } diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py index a6834db43..3befd3e7b 100644 --- a/youtube_dl/extractor/gdcvault.py +++ b/youtube_dl/extractor/gdcvault.py @@ -3,13 +3,11 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( remove_end, HEADRequest, + sanitized_Request, ) @@ -125,7 +123,7 @@ class GDCVaultIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form)) + request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') self._download_webpage(request, display_id, 'Logging in') start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page') diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 4d1f75e63..70a8d8eb9 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -9,8 +9,8 @@ import sys from .common import InfoExtractor from .youtube import YoutubeIE from ..compat import ( + compat_etree_fromstring, compat_urllib_parse_unquote, - compat_urllib_request, compat_urlparse, compat_xml_parse_error, ) @@ -21,7 +21,7 @@ from ..utils import ( HEADRequest, is_html, orderedSet, - parse_xml, + sanitized_Request, smuggle_url, unescapeHTML, unified_strdate, @@ -30,7 +30,10 @@ from ..utils import ( url_basename, xpath_text, ) -from .brightcove import BrightcoveIE +from .brightcove import ( + BrightcoveLegacyIE, + BrightcoveNewIE, +) from .nbc import NBCSportsVPlayerIE from .ooyala import OoyalaIE from .rutv import RUTVIE @@ -41,7 +44,6 @@ from .myvi import MyviIE from .condenast import CondeNastIE from .udn import UDNEmbedIE from .senateisvp import SenateISVPIE -from .bliptv import BlipTVIE from .svt import SVTIE from .pornhub import PornHubIE from .xhamster import XHamsterEmbedIE @@ -50,6 +52,10 @@ from .dailymotion import DailymotionCloudIE from .onionstudios import OnionStudiosIE from .snagfilms import SnagFilmsEmbedIE from .screenwavemedia import ScreenwaveMediaIE +from .mtv import MTVServicesEmbeddedIE +from .pladform import PladformIE +from .googledrive import GoogleDriveIE +from .jwplatform import JWPlatformIE from .ultimedia import UltimediaIE @@ -141,6 +147,7 @@ class GenericIE(InfoExtractor): 'ext': 'mp4', 'title': 'Automatics, robotics and biocybernetics', 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', + 'upload_date': '20130627', 'formats': 'mincount:16', 'subtitles': 'mincount:1', }, @@ -274,7 +281,7 @@ class GenericIE(InfoExtractor): # it also tests brightcove videos that need to set the 'Referer' in the # http requests { - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/', 'info_dict': { 'id': '2765128793001', @@ -298,7 +305,7 @@ class GenericIE(InfoExtractor): 'uploader': 'thestar.com', 'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.', }, - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], }, { 'url': 'http://www.championat.com/video/football/v/87/87499.html', @@ -313,7 +320,7 @@ class GenericIE(InfoExtractor): }, { # https://github.com/rg3/youtube-dl/issues/3541 - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1', 'info_dict': { 'id': '3866516442001', @@ -335,6 +342,7 @@ class GenericIE(InfoExtractor): 'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ', 'ext': 'mp4', 'title': '2cc213299525360.mov', # that's what we get + 'duration': 238.231, }, 'add_ie': ['Ooyala'], }, @@ -346,6 +354,7 @@ class GenericIE(InfoExtractor): 'ext': 'mp4', 'title': '"Steve Jobs: Man in the Machine" trailer', 'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."', + 'duration': 135.427, }, 'params': { 'skip_download': True, @@ -819,6 +828,19 @@ class GenericIE(InfoExtractor): 'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014', }, }, + # Kaltura embed protected with referrer + { + 'url': 'http://www.disney.nl/disney-channel/filmpjes/achter-de-schermen#/videoId/violetta-achter-de-schermen-ruggero', + 'info_dict': { + 'id': '1_g4fbemnq', + 'ext': 'mp4', + 'title': 'Violetta - Achter De Schermen - Ruggero', + 'description': 'Achter de schermen met Ruggero', + 'timestamp': 1435133761, + 'upload_date': '20150624', + 'uploader_id': 'echojecka', + }, + }, # Eagle.Platform embed (generic URL) { 'url': 'http://lenta.ru/news/2015/03/06/navalny/', @@ -943,8 +965,9 @@ class GenericIE(InfoExtractor): 'info_dict': { 'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs', 'ext': 'mp4', - 'description': 'VIDEO: Index/Match versus VLOOKUP.', + 'description': 'VIDEO: INDEX/MATCH versus VLOOKUP.', 'title': 'This is what separates the Excel masters from the wannabes', + 'duration': 191.933, }, 'params': { # m3u8 downloads @@ -1031,20 +1054,30 @@ class GenericIE(InfoExtractor): 'title': 'cinemasnob', }, }, - # Ultimedia embed + # BrightcoveInPageEmbed embed + { + 'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/', + 'info_dict': { + 'id': '4238694884001', + 'ext': 'flv', + 'title': 'Tabletop: Dread, Last Thoughts', + 'description': 'Tabletop: Dread, Last Thoughts', + 'duration': 51690, + }, + }, + # JWPlayer with M3U8 { - 'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html', - 'md5': '25551df6e7c7ab8096ceeeae048c5f64', + 'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video', 'info_dict': { - 'id': 'r303r', + 'id': 'playlist', 'ext': 'mp4', - 'title': 'Kosheen - Pride (live)', - 'thumbnail': 're:^https?://.*\.jpg', - 'duration': 293, - 'upload_date': '20081103', - 'timestamp': 1225733392, - 'uploader_id': '33m03', + 'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ', + 'uploader': 'ren.tv', }, + 'params': { + # m3u8 downloads + 'skip_download': True, + } } ] @@ -1188,7 +1221,7 @@ class GenericIE(InfoExtractor): full_response = None if head_response is False: - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Accept-Encoding', '*') full_response = self._request_webpage(request, video_id) head_response = full_response @@ -1217,7 +1250,7 @@ class GenericIE(InfoExtractor): '%s on generic information extractor.' % ('Forcing' if force else 'Falling back')) if not full_response: - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) # Some webservers may serve compressed content of rather big size (e.g. gzipped flac) # making it impossible to download only chunk of the file (yet we need only 512kB to # test whether it's HTML or not). According to youtube-dl default Accept-Encoding @@ -1252,7 +1285,7 @@ class GenericIE(InfoExtractor): # Is it an RSS feed, a SMIL file or a XSPF playlist? try: - doc = parse_xml(webpage) + doc = compat_etree_fromstring(webpage.encode('utf-8')) if doc.tag == 'rss': return self._extract_rss(url, video_id, doc) elif re.match(r'^(?:{[^}]+})?smil$', doc.tag): @@ -1304,14 +1337,14 @@ class GenericIE(InfoExtractor): return self.playlist_result( urlrs, playlist_id=video_id, playlist_title=video_title) - # Look for BrightCove: - bc_urls = BrightcoveIE._extract_brightcove_urls(webpage) + # Look for Brightcove Legacy Studio embeds + bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage) if bc_urls: self.to_screen('Brightcove video detected.') entries = [{ '_type': 'url', 'url': smuggle_url(bc_url, {'Referer': url}), - 'ie_key': 'Brightcove' + 'ie_key': 'BrightcoveLegacy' } for bc_url in bc_urls] return { @@ -1321,6 +1354,11 @@ class GenericIE(InfoExtractor): 'entries': entries, } + # Look for Brightcove New Studio embeds + bc_urls = BrightcoveNewIE._extract_urls(webpage) + if bc_urls: + return _playlist_from_matches(bc_urls, ie='BrightcoveNew') + # Look for embedded rtl.nl player matches = re.findall( r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"', @@ -1404,11 +1442,6 @@ class GenericIE(InfoExtractor): 'id': match.group('id') } - # Look for embedded blip.tv player - bliptv_url = BlipTVIE._extract_url(webpage) - if bliptv_url: - return self.url_result(bliptv_url, 'BlipTV') - # Look for SVT player svt_url = SVTIE._extract_url(webpage) if svt_url: @@ -1469,7 +1502,7 @@ class GenericIE(InfoExtractor): re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage)) if mobj is not None: - return OoyalaIE._build_url_result(mobj.group('ec')) + return OoyalaIE._build_url_result(smuggle_url(mobj.group('ec'), {'domain': url})) # Look for multiple Ooyala embeds on SBN network websites mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage) @@ -1477,7 +1510,7 @@ class GenericIE(InfoExtractor): embeds = self._parse_json(mobj.group(1), video_id, fatal=False) if embeds: return _playlist_from_matches( - embeds, getter=lambda v: OoyalaIE._url_for_embed_code(v['provider_video_id']), ie='Ooyala') + embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala') # Look for Aparat videos mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage) @@ -1627,12 +1660,9 @@ class GenericIE(InfoExtractor): return self.url_result(url, ie='Vulture') # Look for embedded mtvservices player - mobj = re.search( - r'<iframe src="(?P<url>https?://media\.mtvnservices\.com/embed/[^"]+)"', - webpage) - if mobj is not None: - url = unescapeHTML(mobj.group('url')) - return self.url_result(url, ie='MTVServicesEmbedded') + mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage) + if mtvservices_url: + return self.url_result(mtvservices_url, ie='MTVServicesEmbedded') # Look for embedded yahoo player mobj = re.search( @@ -1671,7 +1701,7 @@ class GenericIE(InfoExtractor): return self.url_result(mobj.group('url'), 'MLB') mobj = re.search( - r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL, + r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL, webpage) if mobj is not None: return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast') @@ -1689,10 +1719,12 @@ class GenericIE(InfoExtractor): return self.url_result(mobj.group('url'), 'Zapiks') # Look for Kaltura embeds - mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_id'\s*:\s*'(?P<id>[^']+)',", webpage) or - re.search(r'(?s)(["\'])(?:https?:)?//cdnapisec\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?\1.*?entry_id\s*:\s*(["\'])(?P<id>[^\2]+?)\2', webpage)) + mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_?[Ii]d'\s*:\s*'(?P<id>[^']+)',", webpage) or + re.search(r'(?s)(?P<q1>["\'])(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?(?P=q1).*?entry_?[Ii]d\s*:\s*(?P<q2>["\'])(?P<id>.+?)(?P=q2)', webpage)) if mobj is not None: - return self.url_result('kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), 'Kaltura') + return self.url_result(smuggle_url( + 'kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), + {'source_url': url}), 'Kaltura') # Look for Eagle.Platform embeds mobj = re.search( @@ -1707,10 +1739,9 @@ class GenericIE(InfoExtractor): return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform') # Look for Pladform embeds - mobj = re.search( - r'<iframe[^>]+src="(?P<url>https?://out\.pladform\.ru/player\?.+?)"', webpage) - if mobj is not None: - return self.url_result(mobj.group('url'), 'Pladform') + pladform_url = PladformIE._extract_url(webpage) + if pladform_url: + return self.url_result(pladform_url) # Look for Playwire embeds mobj = re.search( @@ -1735,9 +1766,14 @@ class GenericIE(InfoExtractor): if nbc_sports_url: return self.url_result(nbc_sports_url, 'NBCSportsVPlayer') + # Look for Google Drive embeds + google_drive_url = GoogleDriveIE._extract_url(webpage) + if google_drive_url: + return self.url_result(google_drive_url, 'GoogleDrive') + # Look for UDN embeds mobj = re.search( - r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._VALID_URL, webpage) + r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage) if mobj is not None: return self.url_result( compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed') @@ -1762,6 +1798,11 @@ class GenericIE(InfoExtractor): if snagfilms_url: return self.url_result(snagfilms_url) + # Look for JWPlatform embeds + jwplatform_url = JWPlatformIE._extract_url(webpage) + if jwplatform_url: + return self.url_result(jwplatform_url, 'JWPlatform') + # Look for ScreenwaveMedia embeds mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage) if mobj is not None: @@ -1862,6 +1903,7 @@ class GenericIE(InfoExtractor): entries = [] for video_url in found: + video_url = video_url.replace('\\/', '/') video_url = compat_urlparse.urljoin(url, video_url) video_id = compat_urllib_parse_unquote(os.path.basename(video_url)) @@ -1873,25 +1915,24 @@ class GenericIE(InfoExtractor): # here's a fun little line of code for you: video_id = os.path.splitext(video_id)[0] + entry_info_dict = { + 'id': video_id, + 'uploader': video_uploader, + 'title': video_title, + 'age_limit': age_limit, + } + ext = determine_ext(video_url) if ext == 'smil': - entries.append({ - 'id': video_id, - 'formats': self._extract_smil_formats(video_url, video_id), - 'uploader': video_uploader, - 'title': video_title, - 'age_limit': age_limit, - }) + entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id) elif ext == 'xspf': return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id) + elif ext == 'm3u8': + entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4') else: - entries.append({ - 'id': video_id, - 'url': video_url, - 'uploader': video_uploader, - 'title': video_title, - 'age_limit': age_limit, - }) + entry_info_dict['url'] = video_url + + entries.append(entry_info_dict) if len(entries) == 1: return entries[0] diff --git a/youtube_dl/extractor/globo.py b/youtube_dl/extractor/globo.py index 33d6432a6..b241c4868 100644 --- a/youtube_dl/extractor/globo.py +++ b/youtube_dl/extractor/globo.py @@ -14,79 +14,58 @@ from ..utils import ( ExtractorError, float_or_none, int_or_none, + str_or_none, ) class GloboIE(InfoExtractor): - _VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)' + _VALID_URL = '(?:globo:|https?://.+?\.globo\.com/(?:[^/]+/)*(?:v/(?:[^/]+/)?|videos/))(?P<id>\d{7,})' _API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist' _SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=17.0.0.132&resource_id=%s' - _VIDEOID_REGEXES = [ - r'\bdata-video-id="(\d+)"', - r'\bdata-player-videosids="(\d+)"', - r'<div[^>]+\bid="(\d+)"', - ] - _RESIGN_EXPIRATION = 86400 - _TESTS = [ - { - 'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/', - 'md5': '03ebf41cb7ade43581608b7d9b71fab0', - 'info_dict': { - 'id': '3654973', - 'ext': 'mp4', - 'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão', - 'duration': 251.585, - 'uploader': 'SporTV', - 'uploader_id': 698, - 'like_count': int, - } - }, - { - 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/', - 'md5': 'b3ccc801f75cd04a914d51dadb83a78d', - 'info_dict': { - 'id': '3607726', - 'ext': 'mp4', - 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa', - 'duration': 103.204, - 'uploader': 'Globo.com', - 'uploader_id': 265, - 'like_count': int, - } - }, - { - 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html', - 'md5': '307fdeae4390ccfe6ba1aa198cf6e72b', - 'info_dict': { - 'id': '3652183', - 'ext': 'mp4', - 'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião', - 'duration': 110.711, - 'uploader': 'Rede Globo', - 'uploader_id': 196, - 'like_count': int, - } + _TESTS = [{ + 'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/', + 'md5': 'b3ccc801f75cd04a914d51dadb83a78d', + 'info_dict': { + 'id': '3607726', + 'ext': 'mp4', + 'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa', + 'duration': 103.204, + 'uploader': 'Globo.com', + 'uploader_id': '265', }, - { - 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/', - 'md5': 'c1defca721ce25b2354e927d3e4b3dec', - 'info_dict': { - 'id': '3928201', - 'ext': 'mp4', - 'title': 'Ator e diretor argentino, Ricado Darín fala sobre utopias e suas perdas', - 'duration': 1472.906, - 'uploader': 'Canal Brasil', - 'uploader_id': 705, - 'like_count': int, - } + }, { + 'url': 'http://globoplay.globo.com/v/4581987/', + 'md5': 'f36a1ecd6a50da1577eee6dd17f67eff', + 'info_dict': { + 'id': '4581987', + 'ext': 'mp4', + 'title': 'Acidentes de trânsito estão entre as maiores causas de queda de energia em SP', + 'duration': 137.973, + 'uploader': 'Rede Globo', + 'uploader_id': '196', }, - ] - - class MD5(): + }, { + 'url': 'http://canalbrasil.globo.com/programas/sangue-latino/videos/3928201.html', + 'only_matching': True, + }, { + 'url': 'http://globosatplay.globo.com/globonews/v/4472924/', + 'only_matching': True, + }, { + 'url': 'http://globotv.globo.com/t/programa/v/clipe-sexo-e-as-negas-adeus/3836166/', + 'only_matching': True, + }, { + 'url': 'http://globotv.globo.com/canal-brasil/sangue-latino/t/todos-os-videos/v/ator-e-diretor-argentino-ricado-darin-fala-sobre-utopias-e-suas-perdas/3928201/', + 'only_matching': True, + }, { + 'url': 'http://canaloff.globo.com/programas/desejar-profundo/videos/4518560.html', + 'only_matching': True, + }] + + class MD5: HEX_FORMAT_LOWERCASE = 0 HEX_FORMAT_UPPERCASE = 1 BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = '' @@ -353,9 +332,6 @@ class GloboIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id') - video = self._download_json( self._API_URL_TEMPLATE % video_id, video_id)['videos'][0] @@ -364,7 +340,7 @@ class GloboIE(InfoExtractor): formats = [] for resource in video['resources']: resource_id = resource.get('_id') - if not resource_id: + if not resource_id or resource_id.endswith('manifest'): continue security = self._download_json( @@ -393,20 +369,21 @@ class GloboIE(InfoExtractor): resource_url = resource['url'] signed_url = '%s?h=%s&k=%s' % (resource_url, signed_hash, 'flash') if resource_id.endswith('m3u8') or resource_url.endswith('.m3u8'): - formats.extend(self._extract_m3u8_formats(signed_url, resource_id, 'mp4')) + formats.extend(self._extract_m3u8_formats( + signed_url, resource_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls', fatal=False)) else: formats.append({ 'url': signed_url, - 'format_id': resource_id, - 'height': resource.get('height'), + 'format_id': 'http-%s' % resource_id, + 'height': int_or_none(resource.get('height')), }) self._sort_formats(formats) duration = float_or_none(video.get('duration'), 1000) - like_count = int_or_none(video.get('likes')) uploader = video.get('channel') - uploader_id = video.get('channel_id') + uploader_id = str_or_none(video.get('channel_id')) return { 'id': video_id, @@ -414,6 +391,46 @@ class GloboIE(InfoExtractor): 'duration': duration, 'uploader': uploader, 'uploader_id': uploader_id, - 'like_count': like_count, 'formats': formats } + + +class GloboArticleIE(InfoExtractor): + _VALID_URL = 'https?://.+?\.globo\.com/(?:[^/]+/)*(?P<id>[^/]+)\.html' + + _VIDEOID_REGEXES = [ + r'\bdata-video-id=["\'](\d{7,})', + r'\bdata-player-videosids=["\'](\d{7,})', + r'\bvideosIDs\s*:\s*["\'](\d{7,})', + r'\bdata-id=["\'](\d{7,})', + r'<div[^>]+\bid=["\'](\d{7,})', + ] + + _TESTS = [{ + 'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html', + 'md5': '307fdeae4390ccfe6ba1aa198cf6e72b', + 'info_dict': { + 'id': '3652183', + 'ext': 'mp4', + 'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião', + 'duration': 110.711, + 'uploader': 'Rede Globo', + 'uploader_id': '196', + } + }, { + 'url': 'http://gq.globo.com/Prazeres/Poder/noticia/2015/10/all-o-desafio-assista-ao-segundo-capitulo-da-serie.html', + 'only_matching': True, + }, { + 'url': 'http://gshow.globo.com/programas/tv-xuxa/O-Programa/noticia/2014/01/xuxa-e-junno-namoram-muuuito-em-luau-de-zeze-di-camargo-e-luciano.html', + 'only_matching': True, + }] + + @classmethod + def suitable(cls, url): + return False if GloboIE.suitable(url) else super(GloboArticleIE, cls).suitable(url) + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id') + return self.url_result('globo:%s' % video_id, 'Globo') diff --git a/youtube_dl/extractor/googledrive.py b/youtube_dl/extractor/googledrive.py new file mode 100644 index 000000000..f354c9c7a --- /dev/null +++ b/youtube_dl/extractor/googledrive.py @@ -0,0 +1,88 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + int_or_none, +) + + +class GoogleDriveIE(InfoExtractor): + _VALID_URL = r'https?://(?:(?:docs|drive)\.google\.com/(?:uc\?.*?id=|file/d/)|video\.google\.com/get_player\?.*?docid=)(?P<id>[a-zA-Z0-9_-]{28})' + _TEST = { + 'url': 'https://drive.google.com/file/d/0ByeS4oOUV-49Zzh4R1J6R09zazQ/edit?pli=1', + 'md5': '881f7700aec4f538571fa1e0eed4a7b6', + 'info_dict': { + 'id': '0ByeS4oOUV-49Zzh4R1J6R09zazQ', + 'ext': 'mp4', + 'title': 'Big Buck Bunny.mp4', + 'duration': 46, + } + } + _FORMATS_EXT = { + '5': 'flv', + '6': 'flv', + '13': '3gp', + '17': '3gp', + '18': 'mp4', + '22': 'mp4', + '34': 'flv', + '35': 'flv', + '36': '3gp', + '37': 'mp4', + '38': 'mp4', + '43': 'webm', + '44': 'webm', + '45': 'webm', + '46': 'webm', + '59': 'mp4', + } + + @staticmethod + def _extract_url(webpage): + mobj = re.search( + r'<iframe[^>]+src="https?://(?:video\.google\.com/get_player\?.*?docid=|(?:docs|drive)\.google\.com/file/d/)(?P<id>[a-zA-Z0-9_-]{28})', + webpage) + if mobj: + return 'https://drive.google.com/file/d/%s' % mobj.group('id') + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage( + 'http://docs.google.com/file/d/%s' % video_id, video_id, encoding='unicode_escape') + + reason = self._search_regex(r'"reason"\s*,\s*"([^"]+)', webpage, 'reason', default=None) + if reason: + raise ExtractorError(reason) + + title = self._search_regex(r'"title"\s*,\s*"([^"]+)', webpage, 'title') + duration = int_or_none(self._search_regex( + r'"length_seconds"\s*,\s*"([^"]+)', webpage, 'length seconds', default=None)) + fmt_stream_map = self._search_regex( + r'"fmt_stream_map"\s*,\s*"([^"]+)', webpage, 'fmt stream map').split(',') + fmt_list = self._search_regex(r'"fmt_list"\s*,\s*"([^"]+)', webpage, 'fmt_list').split(',') + + formats = [] + for fmt, fmt_stream in zip(fmt_list, fmt_stream_map): + fmt_id, fmt_url = fmt_stream.split('|') + resolution = fmt.split('/')[1] + width, height = resolution.split('x') + formats.append({ + 'url': fmt_url, + 'format_id': fmt_id, + 'resolution': resolution, + 'width': int_or_none(width), + 'height': int_or_none(height), + 'ext': self._FORMATS_EXT[fmt_id], + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'thumbnail': self._og_search_thumbnail(webpage), + 'duration': duration, + 'formats': formats, + } diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py index fcefe54cd..731bacd67 100644 --- a/youtube_dl/extractor/googleplus.py +++ b/youtube_dl/extractor/googleplus.py @@ -61,7 +61,7 @@ class GooglePlusIE(InfoExtractor): 'width': int(width), 'height': int(height), } for width, height, video_url in re.findall( - r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)] + r'\d+,(\d+),(\d+),"(https?://[^.]+\.googleusercontent.com.*?)"', webpage)] self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/gputechconf.py b/youtube_dl/extractor/gputechconf.py new file mode 100644 index 000000000..145b55bf3 --- /dev/null +++ b/youtube_dl/extractor/gputechconf.py @@ -0,0 +1,55 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + xpath_element, + xpath_text, + int_or_none, + parse_duration, +) + + +class GPUTechConfIE(InfoExtractor): + _VALID_URL = r'https?://on-demand\.gputechconf\.com/gtc/2015/video/S(?P<id>\d+)\.html' + _TEST = { + 'url': 'http://on-demand.gputechconf.com/gtc/2015/video/S5156.html', + 'md5': 'a8862a00a0fd65b8b43acc5b8e33f798', + 'info_dict': { + 'id': '5156', + 'ext': 'mp4', + 'title': 'Coordinating More Than 3 Million CUDA Threads for Social Network Analysis', + 'duration': 1219, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + root_path = self._search_regex(r'var\s+rootPath\s*=\s*"([^"]+)', webpage, 'root path', 'http://evt.dispeak.com/nvidia/events/gtc15/') + xml_file_id = self._search_regex(r'var\s+xmlFileId\s*=\s*"([^"]+)', webpage, 'xml file id') + + doc = self._download_xml('%sxml/%s.xml' % (root_path, xml_file_id), video_id) + + metadata = xpath_element(doc, 'metadata') + http_host = xpath_text(metadata, 'httpHost', 'http host', True) + mbr_videos = xpath_element(metadata, 'MBRVideos') + + formats = [] + for mbr_video in mbr_videos.findall('MBRVideo'): + stream_name = xpath_text(mbr_video, 'streamName') + if stream_name: + formats.append({ + 'url': 'http://%s/%s' % (http_host, stream_name.replace('mp4:', '')), + 'tbr': int_or_none(xpath_text(mbr_video, 'bitrate')), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': xpath_text(metadata, 'title'), + 'duration': parse_duration(xpath_text(metadata, 'endTime')), + 'creator': xpath_text(metadata, 'speaker'), + 'formats': formats, + } diff --git a/youtube_dl/extractor/groupon.py b/youtube_dl/extractor/groupon.py index 8b9e0e2f8..63c05b6a6 100644 --- a/youtube_dl/extractor/groupon.py +++ b/youtube_dl/extractor/groupon.py @@ -18,6 +18,8 @@ class GrouponIE(InfoExtractor): 'id': 'tubGNycTo_9Uxg82uESj4i61EYX8nyuf', 'ext': 'mp4', 'title': 'Bikram Yoga Huntington Beach | Orange County', + 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', + 'duration': 44.961, }, }], 'params': { diff --git a/youtube_dl/extractor/hearthisat.py b/youtube_dl/extractor/hearthisat.py index a19b31ac0..7d8698655 100644 --- a/youtube_dl/extractor/hearthisat.py +++ b/youtube_dl/extractor/hearthisat.py @@ -4,12 +4,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( HEADRequest, + sanitized_Request, str_to_int, urlencode_postdata, urlhandle_detect_ext, @@ -47,7 +45,7 @@ class HearThisAtIE(InfoExtractor): r'intTrackId\s*=\s*(\d+)', webpage, 'track ID') payload = urlencode_postdata({'tracks[]': track_id}) - req = compat_urllib_request.Request(self._PLAYLIST_URL, payload) + req = sanitized_Request(self._PLAYLIST_URL, payload) req.add_header('Content-type', 'application/x-www-form-urlencoded') track = self._download_json(req, track_id, 'Downloading playlist')[0] diff --git a/youtube_dl/extractor/hotnewhiphop.py b/youtube_dl/extractor/hotnewhiphop.py index 651784b73..31e219945 100644 --- a/youtube_dl/extractor/hotnewhiphop.py +++ b/youtube_dl/extractor/hotnewhiphop.py @@ -3,13 +3,11 @@ from __future__ import unicode_literals import base64 from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, HEADRequest, + sanitized_Request, ) @@ -41,7 +39,7 @@ class HotNewHipHopIE(InfoExtractor): ('mediaType', 's'), ('mediaId', video_id), ]) - r = compat_urllib_request.Request( + r = sanitized_Request( 'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata) r.add_header('Content-Type', 'application/x-www-form-urlencoded') mkd = self._download_json( diff --git a/youtube_dl/extractor/hotstar.py b/youtube_dl/extractor/hotstar.py new file mode 100644 index 000000000..a7c3ce4ab --- /dev/null +++ b/youtube_dl/extractor/hotstar.py @@ -0,0 +1,77 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + determine_ext, + int_or_none, +) + + +class HotStarIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?hotstar\.com/.*?[/-](?P<id>\d{10})' + _TEST = { + 'url': 'http://www.hotstar.com/on-air-with-aib--english-1000076273', + 'info_dict': { + 'id': '1000076273', + 'ext': 'mp4', + 'title': 'On Air With AIB - English', + 'description': 'md5:c957d8868e9bc793ccb813691cc4c434', + 'timestamp': 1447227000, + 'upload_date': '20151111', + 'duration': 381, + }, + 'params': { + # m3u8 download + 'skip_download': True, + } + } + + _GET_CONTENT_TEMPLATE = 'http://account.hotstar.com/AVS/besc?action=GetAggregatedContentDetails&channel=PCTV&contentId=%s' + _GET_CDN_TEMPLATE = 'http://getcdn.hotstar.com/AVS/besc?action=GetCDN&asJson=Y&channel=%s&id=%s&type=%s' + + def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True): + json_data = super(HotStarIE, self)._download_json(url_or_request, video_id, note, fatal=fatal) + if json_data['resultCode'] != 'OK': + if fatal: + raise ExtractorError(json_data['errorDescription']) + return None + return json_data['resultObj'] + + def _real_extract(self, url): + video_id = self._match_id(url) + video_data = self._download_json( + self._GET_CONTENT_TEMPLATE % video_id, + video_id)['contentInfo'][0] + + formats = [] + # PCTV for extracting f4m manifest + for f in ('TABLET',): + format_data = self._download_json( + self._GET_CDN_TEMPLATE % (f, video_id, 'VOD'), + video_id, 'Downloading %s JSON metadata' % f, fatal=False) + if format_data: + format_url = format_data['src'] + ext = determine_ext(format_url) + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats(format_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) + elif ext == 'f4m': + # produce broken files + continue + else: + formats.append({ + 'url': format_url, + 'width': int_or_none(format_data.get('width')), + 'height': int_or_none(format_data.get('height')), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_data['episodeTitle'], + 'description': video_data.get('description'), + 'duration': int_or_none(video_data.get('duration')), + 'timestamp': int_or_none(video_data.get('broadcastDate')), + 'formats': formats, + } diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py index 16677f179..e8f51e545 100644 --- a/youtube_dl/extractor/howcast.py +++ b/youtube_dl/extractor/howcast.py @@ -16,6 +16,7 @@ class HowcastIE(InfoExtractor): 'description': 'md5:dbe792e5f6f1489027027bf2eba188a3', 'timestamp': 1276081287, 'upload_date': '20100609', + 'duration': 56.823, }, 'params': { # m3u8 download diff --git a/youtube_dl/extractor/hypem.py b/youtube_dl/extractor/hypem.py index aa0724a02..b3706fe6d 100644 --- a/youtube_dl/extractor/hypem.py +++ b/youtube_dl/extractor/hypem.py @@ -4,12 +4,10 @@ import json import time from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -30,15 +28,12 @@ class HypemIE(InfoExtractor): track_id = self._match_id(url) data = {'ax': 1, 'ts': time.time()} - data_encoded = compat_urllib_parse.urlencode(data) - complete_url = url + "?" + data_encoded - request = compat_urllib_request.Request(complete_url) + request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data)) response, urlh = self._download_webpage_handle( request, track_id, 'Downloading webpage with the url') - cookie = urlh.headers.get('Set-Cookie', '') html_tracks = self._html_search_regex( - r'(?ms)<script type="application/json" id="displayList-data">\s*(.*?)\s*</script>', + r'(?ms)<script type="application/json" id="displayList-data">(.+?)</script>', response, 'tracks') try: track_list = json.loads(html_tracks) @@ -48,15 +43,14 @@ class HypemIE(InfoExtractor): key = track['key'] track_id = track['id'] - artist = track['artist'] title = track['song'] - serve_url = "http://hypem.com/serve/source/%s/%s" % (track_id, key) - request = compat_urllib_request.Request( - serve_url, '', {'Content-Type': 'application/json'}) - request.add_header('cookie', cookie) + request = sanitized_Request( + 'http://hypem.com/serve/source/%s/%s' % (track_id, key), + '', {'Content-Type': 'application/json'}) song_data = self._download_json(request, track_id, 'Downloading metadata') - final_url = song_data["url"] + final_url = song_data['url'] + artist = track.get('artist') return { 'id': track_id, diff --git a/youtube_dl/extractor/ign.py b/youtube_dl/extractor/ign.py index bf2d2041b..c45c68c1d 100644 --- a/youtube_dl/extractor/ign.py +++ b/youtube_dl/extractor/ign.py @@ -3,6 +3,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_iso8601, +) class IGNIE(InfoExtractor): @@ -11,25 +15,24 @@ class IGNIE(InfoExtractor): Some videos of it.ign.com are also supported """ - _VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)' + _VALID_URL = r'https?://.+?\.ign\.com/(?:[^/]+/)?(?P<type>videos|show_videos|articles|feature|(?:[^/]+/\d+/video))(/.+)?/(?P<name_or_id>.+)' IE_NAME = 'ign.com' - _CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config' - _DESCRIPTION_RE = [ - r'<span class="page-object-description">(.+?)</span>', - r'id="my_show_video">.*?<p>(.*?)</p>', - r'<meta name="description" content="(.*?)"', - ] + _API_URL_TEMPLATE = 'http://apis.ign.com/video/v3/videos/%s' + _EMBED_RE = r'<iframe[^>]+?["\']((?:https?:)?//.+?\.ign\.com.+?/embed.+?)["\']' _TESTS = [ { 'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review', - 'md5': 'eac8bdc1890980122c3b66f14bdd02e9', + 'md5': 'febda82c4bafecd2d44b6e1a18a595f8', 'info_dict': { 'id': '8f862beef863986b2785559b9e1aa599', 'ext': 'mp4', 'title': 'The Last of Us Review', 'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c', + 'timestamp': 1370440800, + 'upload_date': '20130605', + 'uploader_id': 'cberidon@ign.com', } }, { @@ -44,6 +47,9 @@ class IGNIE(InfoExtractor): 'ext': 'mp4', 'title': 'GTA 5 Video Review', 'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.', + 'timestamp': 1379339880, + 'upload_date': '20130916', + 'uploader_id': 'danieljkrupa@gmail.com', }, }, { @@ -52,6 +58,9 @@ class IGNIE(InfoExtractor): 'ext': 'mp4', 'title': '26 Twisted Moments from GTA 5 in Slow Motion', 'description': 'The twisted beauty of GTA 5 in stunning slow motion.', + 'timestamp': 1386878820, + 'upload_date': '20131212', + 'uploader_id': 'togilvie@ign.com', }, }, ], @@ -66,12 +75,20 @@ class IGNIE(InfoExtractor): 'id': '078fdd005f6d3c02f63d795faa1b984f', 'ext': 'mp4', 'title': 'Rewind Theater - Wild Trailer Gamescom 2014', - 'description': ( - 'Giant skeletons, bloody hunts, and captivating' - ' natural beauty take our breath away.' - ), + 'description': 'Brian and Jared explore Michel Ancel\'s captivating new preview.', + 'timestamp': 1408047180, + 'upload_date': '20140814', + 'uploader_id': 'jamesduggan1990@gmail.com', }, }, + { + 'url': 'http://me.ign.com/en/videos/112203/video/how-hitman-aims-to-be-different-than-every-other-s', + 'only_matching': True, + }, + { + 'url': 'http://me.ign.com/ar/angry-birds-2/106533/video/lrd-ldyy-lwl-lfylm-angry-birds', + 'only_matching': True, + }, ] def _find_video_id(self, webpage): @@ -82,7 +99,7 @@ class IGNIE(InfoExtractor): r'<object id="vid_(.+?)"', r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"', ] - return self._search_regex(res_id, webpage, 'video id') + return self._search_regex(res_id, webpage, 'video id', default=None) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -91,7 +108,7 @@ class IGNIE(InfoExtractor): webpage = self._download_webpage(url, name_or_id) if page_type != 'video': multiple_urls = re.findall( - '<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]', + r'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]', webpage) if multiple_urls: entries = [self.url_result(u, ie='IGN') for u in multiple_urls] @@ -102,22 +119,51 @@ class IGNIE(InfoExtractor): } video_id = self._find_video_id(webpage) - result = self._get_video_info(video_id) - description = self._html_search_regex(self._DESCRIPTION_RE, - webpage, 'video description', flags=re.DOTALL) - result['description'] = description - return result + if not video_id: + return self.url_result(self._search_regex( + self._EMBED_RE, webpage, 'embed url')) + return self._get_video_info(video_id) def _get_video_info(self, video_id): - config_url = self._CONFIG_URL_TEMPLATE % video_id - config = self._download_json(config_url, video_id) - media = config['playlist']['media'] + api_data = self._download_json( + self._API_URL_TEMPLATE % video_id, video_id) + + formats = [] + m3u8_url = api_data['refs'].get('m3uUrl') + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', + m3u8_id='hls', fatal=False)) + f4m_url = api_data['refs'].get('f4mUrl') + if f4m_url: + formats.extend(self._extract_f4m_formats( + f4m_url, video_id, f4m_id='hds', fatal=False)) + for asset in api_data['assets']: + formats.append({ + 'url': asset['url'], + 'tbr': asset.get('actual_bitrate_kbps'), + 'fps': asset.get('frame_rate'), + 'height': int_or_none(asset.get('height')), + 'width': int_or_none(asset.get('width')), + }) + self._sort_formats(formats) + + thumbnails = [{ + 'url': thumbnail['url'] + } for thumbnail in api_data.get('thumbnails', [])] + + metadata = api_data['metadata'] return { - 'id': media['metadata']['videoId'], - 'url': media['url'], - 'title': media['metadata']['title'], - 'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'), + 'id': api_data.get('videoId') or video_id, + 'title': metadata.get('longTitle') or metadata.get('name') or metadata.get['title'], + 'description': metadata.get('description'), + 'timestamp': parse_iso8601(metadata.get('publishDate')), + 'duration': int_or_none(metadata.get('duration')), + 'display_id': metadata.get('slug') or video_id, + 'uploader_id': metadata.get('creator'), + 'thumbnails': thumbnails, + 'formats': formats, } @@ -125,16 +171,17 @@ class OneUPIE(IGNIE): _VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html' IE_NAME = '1up.com' - _DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>' - _TESTS = [{ 'url': 'http://gamevideos.1up.com/video/id/34976.html', - 'md5': '68a54ce4ebc772e4b71e3123d413163d', + 'md5': 'c9cc69e07acb675c31a16719f909e347', 'info_dict': { 'id': '34976', 'ext': 'mp4', 'title': 'Sniper Elite V2 - Trailer', - 'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf', + 'description': 'md5:bf0516c5ee32a3217aa703e9b1bc7826', + 'timestamp': 1313099220, + 'upload_date': '20110811', + 'uploader_id': 'IGN', } }] @@ -143,3 +190,36 @@ class OneUPIE(IGNIE): result = super(OneUPIE, self)._real_extract(url) result['id'] = mobj.group('name_or_id') return result + + +class PCMagIE(IGNIE): + _VALID_URL = r'https?://(?:www\.)?pcmag\.com/(?P<type>videos|article2)(/.+)?/(?P<name_or_id>.+)' + IE_NAME = 'pcmag' + + _EMBED_RE = r'iframe.setAttribute\("src",\s*__util.objToUrlString\("http://widgets\.ign\.com/video/embed/content.html?[^"]*url=([^"]+)["&]' + + _TESTS = [{ + 'url': 'http://www.pcmag.com/videos/2015/01/06/010615-whats-new-now-is-gogo-snooping-on-your-data', + 'md5': '212d6154fd0361a2781075f1febbe9ad', + 'info_dict': { + 'id': 'ee10d774b508c9b8ec07e763b9125b91', + 'ext': 'mp4', + 'title': '010615_What\'s New Now: Is GoGo Snooping on Your Data?', + 'description': 'md5:a7071ae64d2f68cc821c729d4ded6bb3', + 'timestamp': 1420571160, + 'upload_date': '20150106', + 'uploader_id': 'cozzipix@gmail.com', + } + }, { + 'url': 'http://www.pcmag.com/article2/0,2817,2470156,00.asp', + 'md5': '94130c1ca07ba0adb6088350681f16c1', + 'info_dict': { + 'id': '042e560ba94823d43afcb12ddf7142ca', + 'ext': 'mp4', + 'title': 'HTC\'s Weird New Re Camera - What\'s New Now', + 'description': 'md5:53433c45df96d2ea5d0fda18be2ca908', + 'timestamp': 1412953920, + 'upload_date': '20141010', + 'uploader_id': 'chris_snyder@pcmag.com', + } + }] diff --git a/youtube_dl/extractor/imdb.py b/youtube_dl/extractor/imdb.py index 4bb574cf3..02e1e428e 100644 --- a/youtube_dl/extractor/imdb.py +++ b/youtube_dl/extractor/imdb.py @@ -4,8 +4,8 @@ import re import json from .common import InfoExtractor -from ..compat import ( - compat_urlparse, +from ..utils import ( + qualities, ) @@ -30,24 +30,33 @@ class ImdbIE(InfoExtractor): descr = self._html_search_regex( r'(?s)<span itemprop="description">(.*?)</span>', webpage, 'description', fatal=False) - available_formats = re.findall( - r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage, - flags=re.MULTILINE) + player_url = 'http://www.imdb.com/video/imdb/vi%s/imdb/single' % video_id + player_page = self._download_webpage( + player_url, video_id, 'Downloading player page') + # the player page contains the info for the default format, we have to + # fetch other pages for the rest of the formats + extra_formats = re.findall(r'href="(?P<url>%s.*?)".*?>(?P<name>.*?)<' % re.escape(player_url), player_page) + format_pages = [ + self._download_webpage( + f_url, video_id, 'Downloading info for %s format' % f_name) + for f_url, f_name in extra_formats] + format_pages.append(player_page) + + quality = qualities(['SD', '480p', '720p']) formats = [] - for f_id, f_path in available_formats: - f_path = f_path.strip() - format_page = self._download_webpage( - compat_urlparse.urljoin(url, f_path), - 'Downloading info for %s format' % f_id) + for format_page in format_pages: json_data = self._search_regex( r'<script[^>]+class="imdb-player-data"[^>]*?>(.*?)</script>', format_page, 'json data', flags=re.DOTALL) info = json.loads(json_data) format_info = info['videoPlayerObject']['video'] + f_id = format_info['ffname'] formats.append({ 'format_id': f_id, 'url': format_info['videoInfoList'][0]['videoUrl'], + 'quality': quality(f_id), }) + self._sort_formats(formats) return { 'id': video_id, diff --git a/youtube_dl/extractor/imgur.py b/youtube_dl/extractor/imgur.py index 70c8ca64e..85e9344aa 100644 --- a/youtube_dl/extractor/imgur.py +++ b/youtube_dl/extractor/imgur.py @@ -13,7 +13,7 @@ from ..utils import ( class ImgurIE(InfoExtractor): - _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!gallery)(?P<id>[a-zA-Z0-9]+)' + _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:(?:gallery|topic/[^/]+)/)?(?P<id>[a-zA-Z0-9]{6,})(?:[/?#&]+|\.[a-z]+)?$' _TESTS = [{ 'url': 'https://i.imgur.com/A61SaA1.gifv', @@ -21,7 +21,7 @@ class ImgurIE(InfoExtractor): 'id': 'A61SaA1', 'ext': 'mp4', 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$', - 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$', + 'description': 'Imgur: The most awesome images on the Internet.', }, }, { 'url': 'https://imgur.com/A61SaA1', @@ -29,8 +29,20 @@ class ImgurIE(InfoExtractor): 'id': 'A61SaA1', 'ext': 'mp4', 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$', - 'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$', + 'description': 'Imgur: The most awesome images on the Internet.', }, + }, { + 'url': 'https://imgur.com/gallery/YcAQlkx', + 'info_dict': { + 'id': 'YcAQlkx', + 'ext': 'mp4', + 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....', + 'description': 'Imgur: The most awesome images on the Internet.' + + } + }, { + 'url': 'http://imgur.com/topic/Funny/N8rOudd', + 'only_matching': True, }] def _real_extract(self, url): @@ -100,25 +112,38 @@ class ImgurIE(InfoExtractor): class ImgurAlbumIE(InfoExtractor): - _VALID_URL = r'https?://(?:i\.)?imgur\.com/gallery/(?P<id>[a-zA-Z0-9]+)' + _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:(?:a|gallery|topic/[^/]+)/)?(?P<id>[a-zA-Z0-9]{5})(?:[/?#&]+)?$' - _TEST = { + _TESTS = [{ 'url': 'http://imgur.com/gallery/Q95ko', 'info_dict': { 'id': 'Q95ko', }, 'playlist_count': 25, - } + }, { + 'url': 'http://imgur.com/a/j6Orj', + 'only_matching': True, + }, { + 'url': 'http://imgur.com/topic/Aww/ll5Vk', + 'only_matching': True, + }] def _real_extract(self, url): album_id = self._match_id(url) album_images = self._download_json( 'http://imgur.com/gallery/%s/album_images/hit.json?all=true' % album_id, - album_id)['data']['images'] - - entries = [ - self.url_result('http://imgur.com/%s' % image['hash']) - for image in album_images if image.get('hash')] - - return self.playlist_result(entries, album_id) + album_id, fatal=False) + + if album_images: + data = album_images.get('data') + if data and isinstance(data, dict): + images = data.get('images') + if images and isinstance(images, list): + entries = [ + self.url_result('http://imgur.com/%s' % image['hash']) + for image in images if image.get('hash')] + return self.playlist_result(entries, album_id) + + # Fallback to single video + return self.url_result('http://imgur.com/%s' % album_id, ImgurIE.ie_key()) diff --git a/youtube_dl/extractor/infoq.py b/youtube_dl/extractor/infoq.py index 71cfd12c5..016af2084 100644 --- a/youtube_dl/extractor/infoq.py +++ b/youtube_dl/extractor/infoq.py @@ -1,3 +1,5 @@ +# coding: utf-8 + from __future__ import unicode_literals import base64 @@ -5,8 +7,9 @@ import base64 from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, - compat_urlparse, + compat_parse_qs, ) +from ..utils import determine_ext class InfoQIE(InfoExtractor): @@ -16,7 +19,7 @@ class InfoQIE(InfoExtractor): 'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things', 'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2', 'info_dict': { - 'id': '12-jan-pythonthings', + 'id': 'A-Few-of-My-Favorite-Python-Things', 'ext': 'mp4', 'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.', 'title': 'A Few of My Favorite [Python] Things', @@ -24,40 +27,84 @@ class InfoQIE(InfoExtractor): }, { 'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript', 'only_matching': True, + }, { + 'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery', + 'md5': '4918d0cca1497f2244572caf626687ef', + 'info_dict': { + 'id': 'openstack-continued-delivery', + 'title': 'OpenStack持续交付之路', + 'ext': 'flv', + 'description': 'md5:308d981fb28fa42f49f9568322c683ff', + }, }] - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) + def _extract_bokecc_videos(self, webpage, video_id): + # TODO: bokecc.com is a Chinese video cloud platform + # It should have an independent extractor but I don't have other + # examples using bokecc + player_params_str = self._html_search_regex( + r'<script[^>]+src="http://p\.bokecc\.com/player\?([^"]+)', + webpage, 'player params', default=None) - video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title') - video_description = self._html_search_meta('description', webpage, 'description') + player_params = compat_parse_qs(player_params_str) + + info_xml = self._download_xml( + 'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % ( + player_params['siteid'][0], player_params['vid'][0]), video_id) + + return [{ + 'format_id': 'bokecc', + 'url': quality.find('./copy').attrib['playurl'], + 'preference': int(quality.attrib['value']), + } for quality in info_xml.findall('./video/quality')] + def _extract_rtmp_videos(self, webpage): # The server URL is hardcoded video_url = 'rtmpe://video.infoq.com/cfx/st/' # Extract video URL encoded_id = self._search_regex( - r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id') + r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None) + real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8')) playpath = 'mp4:' + real_id - video_filename = playpath.split('/')[-1] - video_id, extension = video_filename.split('.') - - http_base = self._search_regex( - r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage, - 'HTTP base URL') - - formats = [{ + return [{ 'format_id': 'rtmp', 'url': video_url, - 'ext': extension, + 'ext': determine_ext(playpath), 'play_path': playpath, - }, { + }] + + def _extract_http_videos(self, webpage): + http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL') + + policy = self._search_regex(r'InfoQConstants.scp\s*=\s*\'([^\']+)\'', webpage, 'policy') + signature = self._search_regex(r'InfoQConstants.scs\s*=\s*\'([^\']+)\'', webpage, 'signature') + key_pair_id = self._search_regex(r'InfoQConstants.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id') + + return [{ 'format_id': 'http', - 'url': compat_urlparse.urljoin(url, http_base) + real_id, + 'url': http_video_url, + 'http_headers': { + 'Cookie': 'CloudFront-Policy=%s; CloudFront-Signature=%s; CloudFront-Key-Pair-Id=%s' % ( + policy, signature, key_pair_id), + }, }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title') + video_description = self._html_search_meta('description', webpage, 'description') + + if '/cn/' in url: + # for China videos, HTTP video URL exists but always fails with 403 + formats = self._extract_bokecc_videos(webpage, video_id) + else: + formats = self._extract_rtmp_videos(webpage) + self._extract_http_videos(webpage) + self._sort_formats(formats) return { diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py index 3d78f78c4..e5e16ca3b 100644 --- a/youtube_dl/extractor/instagram.py +++ b/youtube_dl/extractor/instagram.py @@ -10,8 +10,8 @@ from ..utils import ( class InstagramIE(InfoExtractor): - _VALID_URL = r'https://instagram\.com/p/(?P<id>[\da-zA-Z]+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+)' + _TESTS = [{ 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc', 'md5': '0d2da106a9d2631273e192b372806516', 'info_dict': { @@ -21,7 +21,10 @@ class InstagramIE(InfoExtractor): 'title': 'Video by naomipq', 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8', } - } + }, { + 'url': 'https://instagram.com/p/-Cmh1cukG2/', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) @@ -44,7 +47,7 @@ class InstagramIE(InfoExtractor): class InstagramUserIE(InfoExtractor): - _VALID_URL = r'https://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])' + _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])' IE_DESC = 'Instagram user profile' IE_NAME = 'instagram:user' _TEST = { diff --git a/youtube_dl/extractor/iprima.py b/youtube_dl/extractor/iprima.py index 821c8ec10..36baf3245 100644 --- a/youtube_dl/extractor/iprima.py +++ b/youtube_dl/extractor/iprima.py @@ -6,12 +6,10 @@ from random import random from math import floor from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, remove_end, + sanitized_Request, ) @@ -61,7 +59,7 @@ class IPrimaIE(InfoExtractor): (floor(random() * 1073741824), floor(random() * 1073741824)) ) - req = compat_urllib_request.Request(player_url) + req = sanitized_Request(player_url) req.add_header('Referer', url) playerpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py index ce1ab3820..66a70a181 100644 --- a/youtube_dl/extractor/iqiyi.py +++ b/youtube_dl/extractor/iqiyi.py @@ -16,7 +16,7 @@ class IqiyiIE(InfoExtractor): IE_NAME = 'iqiyi' IE_DESC = '爱奇艺' - _VALID_URL = r'http://(?:www\.)iqiyi.com/v_.+?\.html' + _VALID_URL = r'http://(?:[^.]+\.)?iqiyi\.com/.+\.html' _TESTS = [{ 'url': 'http://www.iqiyi.com/v_19rrojlavg.html', @@ -84,6 +84,15 @@ class IqiyiIE(InfoExtractor): 'params': { 'skip_download': True, }, + }, { + 'url': 'http://www.iqiyi.com/w_19rt6o8t9p.html', + 'only_matching': True, + }, { + 'url': 'http://www.iqiyi.com/a_19rrhbc6kt.html', + 'only_matching': True, + }, { + 'url': 'http://yule.iqiyi.com/pcb.html', + 'only_matching': True, }] _FORMATS_MAP = [ @@ -191,7 +200,7 @@ class IqiyiIE(InfoExtractor): 'vid': video_id, 'vinfo': 1, 'tm': tm, - 'enc': self.md5_text((enc_key + tail)[1:64:2] + tail), + 'enc': self.md5_text(enc_key + tail), 'qyid': _uuid, 'tn': random.random(), 'um': 0, @@ -205,7 +214,8 @@ class IqiyiIE(InfoExtractor): def get_enc_key(self, swf_url, video_id): # TODO: automatic key extraction - enc_key = 'eac64f22daf001da6ba9aa8da4d501508bbe90a4d4091fea3b0582a85b38c2cc' # last update at 2015-09-23-23 for Zombie::bite + # last update at 2015-12-18 for Zombie::bite + enc_key = '8b6b683780897eb8d9a48a02ccc4817d'[::-1] return enc_key def _real_extract(self, url): diff --git a/youtube_dl/extractor/ivi.py b/youtube_dl/extractor/ivi.py index e82594444..029878d24 100644 --- a/youtube_dl/extractor/ivi.py +++ b/youtube_dl/extractor/ivi.py @@ -5,11 +5,9 @@ import re import json from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -78,7 +76,7 @@ class IviIE(InfoExtractor): ] } - request = compat_urllib_request.Request(api_url, json.dumps(data)) + request = sanitized_Request(api_url, json.dumps(data)) video_json_page = self._download_webpage( request, video_id, 'Downloading video JSON') diff --git a/youtube_dl/extractor/jeuxvideo.py b/youtube_dl/extractor/jeuxvideo.py index 1df084d87..eef7daa29 100644 --- a/youtube_dl/extractor/jeuxvideo.py +++ b/youtube_dl/extractor/jeuxvideo.py @@ -28,7 +28,7 @@ class JeuxVideoIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) title = mobj.group(1) webpage = self._download_webpage(url, title) - title = self._html_search_meta('name', webpage) + title = self._html_search_meta('name', webpage) or self._og_search_title(webpage) config_url = self._html_search_regex( r'data-src="(/contenu/medias/video.php.*?)"', webpage, 'config URL') diff --git a/youtube_dl/extractor/jwplatform.py b/youtube_dl/extractor/jwplatform.py new file mode 100644 index 000000000..8e90d5986 --- /dev/null +++ b/youtube_dl/extractor/jwplatform.py @@ -0,0 +1,70 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import int_or_none + + +class JWPlatformIE(InfoExtractor): + _VALID_URL = r'(?:https?://content\.jwplatform\.com/(?:feeds|players|jw6)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})' + _TEST = { + 'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js', + 'md5': 'fa8899fa601eb7c83a64e9d568bdf325', + 'info_dict': { + 'id': 'nPripu9l', + 'ext': 'mov', + 'title': 'Big Buck Bunny Trailer', + 'description': 'Big Buck Bunny is a short animated film by the Blender Institute. It is made using free and open source software.', + 'upload_date': '20081127', + 'timestamp': 1227796140, + } + } + + @staticmethod + def _extract_url(webpage): + mobj = re.search( + r'<script[^>]+?src=["\'](?P<url>(?:https?:)?//content.jwplatform.com/players/[a-zA-Z0-9]{8})', + webpage) + if mobj: + return mobj.group('url') + + def _real_extract(self, url): + video_id = self._match_id(url) + json_data = self._download_json('http://content.jwplatform.com/feeds/%s.json' % video_id, video_id) + video_data = json_data['playlist'][0] + subtitles = {} + for track in video_data['tracks']: + if track['kind'] == 'captions': + subtitles[track['label']] = [{'url': self._proto_relative_url(track['file'])}] + + formats = [] + for source in video_data['sources']: + source_url = self._proto_relative_url(source['file']) + source_type = source.get('type') or '' + if source_type == 'application/vnd.apple.mpegurl': + formats.extend(self._extract_m3u8_formats( + source_url, video_id, 'mp4', 'm3u8_native', fatal=False)) + elif source_type.startswith('audio'): + formats.append({ + 'url': source_url, + 'vcodec': 'none', + }) + else: + formats.append({ + 'url': source_url, + 'width': int_or_none(source.get('width')), + 'height': int_or_none(source.get('height')), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_data['title'], + 'description': video_data.get('description'), + 'thumbnail': self._proto_relative_url(video_data.get('image')), + 'timestamp': int_or_none(video_data.get('pubdate')), + 'subtitles': subtitles, + 'formats': formats, + } diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py index 3dca0e566..ccbc39c66 100644 --- a/youtube_dl/extractor/kaltura.py +++ b/youtube_dl/extractor/kaltura.py @@ -2,12 +2,18 @@ from __future__ import unicode_literals import re +import base64 from .common import InfoExtractor -from ..compat import compat_urllib_parse +from ..compat import ( + compat_urllib_parse, + compat_urlparse, +) from ..utils import ( + clean_html, ExtractorError, int_or_none, + unsmuggle_url, ) @@ -16,7 +22,7 @@ class KalturaIE(InfoExtractor): (?: kaltura:(?P<partner_id_s>\d+):(?P<id_s>[0-9a-z_]+)| https?:// - (:?(?:www|cdnapisec)\.)?kaltura\.com/ + (:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com/ (?: (?: # flash player @@ -39,7 +45,7 @@ class KalturaIE(InfoExtractor): 'info_dict': { 'id': '1_1jc2y3e4', 'ext': 'mp4', - 'title': 'Track 4', + 'title': 'Straight from the Heart', 'upload_date': '20131219', 'uploader_id': 'mlundberg@wolfgangsvault.com', 'description': 'The Allman Brothers Band, 12/16/1981', @@ -109,43 +115,65 @@ class KalturaIE(InfoExtractor): 'version': '-1', }, { - 'action': 'getContextData', - 'contextDataParams:objectType': 'KalturaEntryContextDataParams', - 'contextDataParams:referrer': 'http://www.kaltura.com/', - 'contextDataParams:streamerType': 'http', + 'action': 'getbyentryid', 'entryId': video_id, - 'service': 'baseentry', + 'service': 'flavorAsset', }, ] return self._kaltura_api_call( video_id, actions, note='Downloading video info JSON') def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + mobj = re.match(self._VALID_URL, url) partner_id = mobj.group('partner_id_s') or mobj.group('partner_id') or mobj.group('partner_id_html5') entry_id = mobj.group('id_s') or mobj.group('id') or mobj.group('id_html5') - info, source_data = self._get_video_info(entry_id, partner_id) - - formats = [{ - 'format_id': '%(fileExt)s-%(bitrate)s' % f, - 'ext': f['fileExt'], - 'tbr': f['bitrate'], - 'fps': f.get('frameRate'), - 'filesize_approx': int_or_none(f.get('size'), invscale=1024), - 'container': f.get('containerFormat'), - 'vcodec': f.get('videoCodecId'), - 'height': f.get('height'), - 'width': f.get('width'), - 'url': '%s/flavorId/%s' % (info['dataUrl'], f['id']), - } for f in source_data['flavorAssets']] + info, flavor_assets = self._get_video_info(entry_id, partner_id) + + source_url = smuggled_data.get('source_url') + if source_url: + referrer = base64.b64encode( + '://'.join(compat_urlparse.urlparse(source_url)[:2]) + .encode('utf-8')).decode('utf-8') + else: + referrer = None + + formats = [] + for f in flavor_assets: + # Continue if asset is not ready + if f['status'] != 2: + continue + video_url = '%s/flavorId/%s' % (info['dataUrl'], f['id']) + if referrer: + video_url += '?referrer=%s' % referrer + formats.append({ + 'format_id': '%(fileExt)s-%(bitrate)s' % f, + 'ext': f.get('fileExt'), + 'tbr': int_or_none(f['bitrate']), + 'fps': int_or_none(f.get('frameRate')), + 'filesize_approx': int_or_none(f.get('size'), invscale=1024), + 'container': f.get('containerFormat'), + 'vcodec': f.get('videoCodecId'), + 'height': int_or_none(f.get('height')), + 'width': int_or_none(f.get('width')), + 'url': video_url, + }) + m3u8_url = info['dataUrl'].replace('format/url', 'format/applehttp') + if referrer: + m3u8_url += '?referrer=%s' % referrer + formats.extend(self._extract_m3u8_formats( + m3u8_url, entry_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) + + self._check_formats(formats, entry_id) self._sort_formats(formats) return { 'id': entry_id, 'title': info['name'], 'formats': formats, - 'description': info.get('description'), + 'description': clean_html(info.get('description')), 'thumbnail': info.get('thumbnailUrl'), 'duration': info.get('duration'), 'timestamp': info.get('createdAt'), diff --git a/youtube_dl/extractor/keek.py b/youtube_dl/extractor/keek.py index c0956ba09..94a03d277 100644 --- a/youtube_dl/extractor/keek.py +++ b/youtube_dl/extractor/keek.py @@ -1,46 +1,39 @@ +# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class KeekIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)' + _VALID_URL = r'https?://(?:www\.)?keek\.com/keek/(?P<id>\w+)' IE_NAME = 'keek' _TEST = { - 'url': 'https://www.keek.com/ytdl/keeks/NODfbab', - 'md5': '09c5c109067536c1cec8bac8c21fea05', + 'url': 'https://www.keek.com/keek/NODfbab', + 'md5': '9b0636f8c0f7614afa4ea5e4c6e57e83', 'info_dict': { 'id': 'NODfbab', 'ext': 'mp4', - 'uploader': 'youtube-dl project', - 'uploader_id': 'ytdl', - 'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .', + 'title': 'md5:35d42050a3ece241d5ddd7fdcc6fd896', + 'uploader': 'ytdl', + 'uploader_id': 'eGT5bab', }, } def _real_extract(self, url): video_id = self._match_id(url) - video_url = 'http://cdn.keek.com/keek/video/%s' % video_id - thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id webpage = self._download_webpage(url, video_id) - raw_desc = self._html_search_meta('description', webpage) - if raw_desc: - uploader = self._html_search_regex( - r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False) - uploader_id = self._html_search_regex( - r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False) - else: - uploader = None - uploader_id = None - return { 'id': video_id, - 'url': video_url, + 'url': self._og_search_video_url(webpage), 'ext': 'mp4', - 'title': self._og_search_title(webpage), - 'thumbnail': thumbnail, - 'uploader': uploader, - 'uploader_id': uploader_id, + 'title': self._og_search_description(webpage).strip(), + 'thumbnail': self._og_search_thumbnail(webpage), + 'uploader': self._search_regex( + r'data-username=(["\'])(?P<uploader>.+?)\1', webpage, + 'uploader', fatal=False, group='uploader'), + 'uploader_id': self._search_regex( + r'data-user-id=(["\'])(?P<uploader_id>.+?)\1', webpage, + 'uploader id', fatal=False, group='uploader_id'), } diff --git a/youtube_dl/extractor/keezmovies.py b/youtube_dl/extractor/keezmovies.py index 82eddec51..126ca13df 100644 --- a/youtube_dl/extractor/keezmovies.py +++ b/youtube_dl/extractor/keezmovies.py @@ -1,12 +1,11 @@ from __future__ import unicode_literals -import os import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_urlparse, - compat_urllib_request, +from ..utils import ( + sanitized_Request, + url_basename, ) @@ -14,19 +13,20 @@ class KeezMoviesIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?keezmovies\.com/video/.+?(?P<id>[0-9]+)(?:[/?&]|$)' _TEST = { 'url': 'http://www.keezmovies.com/video/petite-asian-lady-mai-playing-in-bathtub-1214711', - 'md5': '6e297b7e789329923fcf83abb67c9289', + 'md5': '1c1e75d22ffa53320f45eeb07bc4cdc0', 'info_dict': { 'id': '1214711', 'ext': 'mp4', 'title': 'Petite Asian Lady Mai Playing In Bathtub', 'age_limit': 18, + 'thumbnail': 're:^https?://.*\.jpg$', } } def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) @@ -38,21 +38,29 @@ class KeezMoviesIE(InfoExtractor): video_title = self._html_search_regex( r'<h1 [^>]*>([^<]+)', webpage, 'title') - video_url = self._html_search_regex( - r'(?s)html5VideoPlayer = .*?src="([^"]+)"', webpage, 'video URL') - path = compat_urllib_parse_urlparse(video_url).path - extension = os.path.splitext(path)[1][1:] - format = path.split('/')[4].split('_')[:2] - format = "-".join(format) + flashvars = self._parse_json(self._search_regex( + r'var\s+flashvars\s*=\s*([^;]+);', webpage, 'flashvars'), video_id) + + formats = [] + for height in (180, 240, 480): + if flashvars.get('quality_%dp' % height): + video_url = flashvars['quality_%dp' % height] + a_format = { + 'url': video_url, + 'height': height, + 'format_id': '%dp' % height, + } + filename_parts = url_basename(video_url).split('_') + if len(filename_parts) >= 2 and re.match(r'\d+[Kk]', filename_parts[1]): + a_format['tbr'] = int(filename_parts[1][:-1]) + formats.append(a_format) age_limit = self._rta_search(webpage) return { 'id': video_id, 'title': video_title, - 'url': video_url, - 'ext': extension, - 'format': format, - 'format_id': format, + 'formats': formats, 'age_limit': age_limit, + 'thumbnail': flashvars.get('image_url') } diff --git a/youtube_dl/extractor/kuwo.py b/youtube_dl/extractor/kuwo.py index fa233377d..0c8ed5d07 100644 --- a/youtube_dl/extractor/kuwo.py +++ b/youtube_dl/extractor/kuwo.py @@ -57,6 +57,7 @@ class KuwoIE(KuwoBaseIE): 'upload_date': '20080122', 'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c' }, + 'skip': 'this song has been offline because of copyright issues', }, { 'url': 'http://www.kuwo.cn/yinyue/6446136/', 'info_dict': { @@ -76,9 +77,11 @@ class KuwoIE(KuwoBaseIE): webpage = self._download_webpage( url, song_id, note='Download song detail info', errnote='Unable to get song detail info') + if '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage: + raise ExtractorError('this song has been offline because of copyright issues', expected=True) song_name = self._html_search_regex( - r'<h1[^>]+title="([^"]+)">', webpage, 'song name') + r'(?s)class="(?:[^"\s]+\s+)*title(?:\s+[^"\s]+)*".*?<h1[^>]+title="([^"]+)"', webpage, 'song name') singer_name = self._html_search_regex( r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"', webpage, 'singer name', fatal=False) diff --git a/youtube_dl/extractor/letv.py b/youtube_dl/extractor/letv.py index a28abb0f0..be648000e 100644 --- a/youtube_dl/extractor/letv.py +++ b/youtube_dl/extractor/letv.py @@ -8,14 +8,15 @@ import time from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, - compat_urlparse, + compat_ord, ) from ..utils import ( determine_ext, ExtractorError, parse_iso8601, + sanitized_Request, int_or_none, + encode_data_uri, ) @@ -25,15 +26,16 @@ class LetvIE(InfoExtractor): _TESTS = [{ 'url': 'http://www.letv.com/ptv/vplay/22005890.html', - 'md5': 'cab23bd68d5a8db9be31c9a222c1e8df', + 'md5': 'edadcfe5406976f42f9f266057ee5e40', 'info_dict': { 'id': '22005890', 'ext': 'mp4', 'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家', - 'timestamp': 1424747397, - 'upload_date': '20150224', 'description': 'md5:a9cb175fd753e2962176b7beca21a47c', - } + }, + 'params': { + 'hls_prefer_native': True, + }, }, { 'url': 'http://www.letv.com/ptv/vplay/1415246.html', 'info_dict': { @@ -42,16 +44,22 @@ class LetvIE(InfoExtractor): 'title': '美人天下01', 'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda', }, + 'params': { + 'hls_prefer_native': True, + }, }, { 'note': 'This video is available only in Mainland China, thus a proxy is needed', 'url': 'http://www.letv.com/ptv/vplay/1118082.html', - 'md5': 'f80936fbe20fb2f58648e81386ff7927', + 'md5': '2424c74948a62e5f31988438979c5ad1', 'info_dict': { 'id': '1118082', 'ext': 'mp4', 'title': '与龙共舞 完整版', 'description': 'md5:7506a5eeb1722bb9d4068f85024e3986', }, + 'params': { + 'hls_prefer_native': True, + }, 'skip': 'Only available in China', }] @@ -74,6 +82,27 @@ class LetvIE(InfoExtractor): _loc3_ = self.ror(_loc3_, _loc2_ % 17) return _loc3_ + # see M3U8Encryption class in KLetvPlayer.swf + @staticmethod + def decrypt_m3u8(encrypted_data): + if encrypted_data[:5].decode('utf-8').lower() != 'vc_01': + return encrypted_data + encrypted_data = encrypted_data[5:] + + _loc4_ = bytearray() + while encrypted_data: + b = compat_ord(encrypted_data[0]) + _loc4_.extend([b // 16, b & 0x0f]) + encrypted_data = encrypted_data[1:] + idx = len(_loc4_) - 11 + _loc4_ = _loc4_[idx:] + _loc4_[:idx] + _loc7_ = bytearray() + while _loc4_: + _loc7_.append(_loc4_[0] * 16 + _loc4_[1]) + _loc4_ = _loc4_[2:] + + return bytes(_loc7_) + def _real_extract(self, url): media_id = self._match_id(url) page = self._download_webpage(url, media_id) @@ -85,7 +114,7 @@ class LetvIE(InfoExtractor): 'tkey': self.calc_time_key(int(time.time())), 'domain': 'www.letv.com' } - play_json_req = compat_urllib_request.Request( + play_json_req = sanitized_Request( 'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params) ) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') @@ -115,23 +144,28 @@ class LetvIE(InfoExtractor): for format_id in formats: if format_id in dispatch: media_url = playurl['domain'][0] + dispatch[format_id][0] - - # Mimic what flvxz.com do - url_parts = list(compat_urlparse.urlparse(media_url)) - qs = dict(compat_urlparse.parse_qs(url_parts[4])) - qs.update({ - 'platid': '14', - 'splatid': '1401', - 'tss': 'no', - 'retry': 1 + media_url += '&' + compat_urllib_parse.urlencode({ + 'm3v': 1, + 'format': 1, + 'expect': 3, + 'rateid': format_id, }) - url_parts[4] = compat_urllib_parse.urlencode(qs) - media_url = compat_urlparse.urlunparse(url_parts) + + nodes_data = self._download_json( + media_url, media_id, + 'Download JSON metadata for format %s' % format_id) + + req = self._request_webpage( + nodes_data['nodelist'][0]['location'], media_id, + note='Downloading m3u8 information for format %s' % format_id) + + m3u8_data = self.decrypt_m3u8(req.read()) url_info_dict = { - 'url': media_url, + 'url': encode_data_uri(m3u8_data, 'application/vnd.apple.mpegurl'), 'ext': determine_ext(dispatch[format_id][1]), 'format_id': format_id, + 'protocol': 'm3u8', } if format_id[-1:] == 'p': diff --git a/youtube_dl/extractor/limelight.py b/youtube_dl/extractor/limelight.py new file mode 100644 index 000000000..fb03dd527 --- /dev/null +++ b/youtube_dl/extractor/limelight.py @@ -0,0 +1,229 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + float_or_none, + int_or_none, +) + + +class LimelightBaseIE(InfoExtractor): + _PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s' + _API_URL = 'http://api.video.limelight.com/rest/organizations/%s/%s/%s/%s.json' + + def _call_playlist_service(self, item_id, method, fatal=True): + return self._download_json( + self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method), + item_id, 'Downloading PlaylistService %s JSON' % method, fatal=fatal) + + def _call_api(self, organization_id, item_id, method): + return self._download_json( + self._API_URL % (organization_id, self._API_PATH, item_id, method), + item_id, 'Downloading API %s JSON' % method) + + def _extract(self, item_id, pc_method, mobile_method, meta_method): + pc = self._call_playlist_service(item_id, pc_method) + metadata = self._call_api(pc['orgId'], item_id, meta_method) + mobile = self._call_playlist_service(item_id, mobile_method, fatal=False) + return pc, mobile, metadata + + def _extract_info(self, streams, mobile_urls, properties): + video_id = properties['media_id'] + formats = [] + + for stream in streams: + stream_url = stream.get('url') + if not stream_url: + continue + if '.f4m' in stream_url: + formats.extend(self._extract_f4m_formats(stream_url, video_id)) + else: + fmt = { + 'url': stream_url, + 'abr': float_or_none(stream.get('audioBitRate')), + 'vbr': float_or_none(stream.get('videoBitRate')), + 'fps': float_or_none(stream.get('videoFrameRate')), + 'width': int_or_none(stream.get('videoWidthInPixels')), + 'height': int_or_none(stream.get('videoHeightInPixels')), + 'ext': determine_ext(stream_url) + } + rtmp = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', stream_url) + if rtmp: + format_id = 'rtmp' + if stream.get('videoBitRate'): + format_id += '-%d' % int_or_none(stream['videoBitRate']) + fmt.update({ + 'url': rtmp.group('url'), + 'play_path': rtmp.group('playpath'), + 'app': rtmp.group('app'), + 'ext': 'flv', + 'format_id': format_id, + }) + formats.append(fmt) + + for mobile_url in mobile_urls: + media_url = mobile_url.get('mobileUrl') + if not media_url: + continue + format_id = mobile_url.get('targetMediaPlatform') + if determine_ext(media_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + media_url, video_id, 'mp4', entry_protocol='m3u8_native', + preference=-1, m3u8_id=format_id)) + else: + formats.append({ + 'url': media_url, + 'format_id': format_id, + 'preference': -1, + }) + + self._sort_formats(formats) + + title = properties['title'] + description = properties.get('description') + timestamp = int_or_none(properties.get('publish_date') or properties.get('create_date')) + duration = float_or_none(properties.get('duration_in_milliseconds'), 1000) + filesize = int_or_none(properties.get('total_storage_in_bytes')) + categories = [properties.get('category')] + tags = properties.get('tags', []) + thumbnails = [{ + 'url': thumbnail['url'], + 'width': int_or_none(thumbnail.get('width')), + 'height': int_or_none(thumbnail.get('height')), + } for thumbnail in properties.get('thumbnails', []) if thumbnail.get('url')] + + subtitles = {} + for caption in properties.get('captions', {}): + lang = caption.get('language_code') + subtitles_url = caption.get('url') + if lang and subtitles_url: + subtitles[lang] = [{ + 'url': subtitles_url, + }] + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'formats': formats, + 'timestamp': timestamp, + 'duration': duration, + 'filesize': filesize, + 'categories': categories, + 'tags': tags, + 'thumbnails': thumbnails, + 'subtitles': subtitles, + } + + +class LimelightMediaIE(LimelightBaseIE): + IE_NAME = 'limelight' + _VALID_URL = r'(?:limelight:media:|http://link\.videoplatform\.limelight\.com/media/\??\bmediaId=)(?P<id>[a-z0-9]{32})' + _TESTS = [{ + 'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86', + 'info_dict': { + 'id': '3ffd040b522b4485b6d84effc750cd86', + 'ext': 'flv', + 'title': 'HaP and the HB Prince Trailer', + 'description': 'md5:8005b944181778e313d95c1237ddb640', + 'thumbnail': 're:^https?://.*\.jpeg$', + 'duration': 144.23, + 'timestamp': 1244136834, + 'upload_date': '20090604', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + # video with subtitles + 'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335', + 'info_dict': { + 'id': 'a3e00274d4564ec4a9b29b9466432335', + 'ext': 'flv', + 'title': '3Play Media Overview Video', + 'description': '', + 'thumbnail': 're:^https?://.*\.jpeg$', + 'duration': 78.101, + 'timestamp': 1338929955, + 'upload_date': '20120605', + 'subtitles': 'mincount:9', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }] + _PLAYLIST_SERVICE_PATH = 'media' + _API_PATH = 'media' + + def _real_extract(self, url): + video_id = self._match_id(url) + + pc, mobile, metadata = self._extract( + video_id, 'getPlaylistByMediaId', 'getMobilePlaylistByMediaId', 'properties') + + return self._extract_info( + pc['playlistItems'][0].get('streams', []), + mobile['mediaList'][0].get('mobileUrls', []) if mobile else [], + metadata) + + +class LimelightChannelIE(LimelightBaseIE): + IE_NAME = 'limelight:channel' + _VALID_URL = r'(?:limelight:channel:|http://link\.videoplatform\.limelight\.com/media/\??\bchannelId=)(?P<id>[a-z0-9]{32})' + _TEST = { + 'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082', + 'info_dict': { + 'id': 'ab6a524c379342f9b23642917020c082', + 'title': 'Javascript Sample Code', + }, + 'playlist_mincount': 3, + } + _PLAYLIST_SERVICE_PATH = 'channel' + _API_PATH = 'channels' + + def _real_extract(self, url): + channel_id = self._match_id(url) + + pc, mobile, medias = self._extract( + channel_id, 'getPlaylistByChannelId', + 'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1', 'media') + + entries = [ + self._extract_info( + pc['playlistItems'][i].get('streams', []), + mobile['mediaList'][i].get('mobileUrls', []) if mobile else [], + medias['media_list'][i]) + for i in range(len(medias['media_list']))] + + return self.playlist_result(entries, channel_id, pc['title']) + + +class LimelightChannelListIE(LimelightBaseIE): + IE_NAME = 'limelight:channel_list' + _VALID_URL = r'(?:limelight:channel_list:|http://link\.videoplatform\.limelight\.com/media/\?.*?\bchannelListId=)(?P<id>[a-z0-9]{32})' + _TEST = { + 'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b', + 'info_dict': { + 'id': '301b117890c4465c8179ede21fd92e2b', + 'title': 'Website - Hero Player', + }, + 'playlist_mincount': 2, + } + _PLAYLIST_SERVICE_PATH = 'channel_list' + + def _real_extract(self, url): + channel_list_id = self._match_id(url) + + channel_list = self._call_playlist_service(channel_list_id, 'getMobileChannelListById') + + entries = [ + self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel') + for channel in channel_list['channelList']] + + return self.playlist_result(entries, channel_list_id, channel_list['title']) diff --git a/youtube_dl/extractor/livestream.py b/youtube_dl/extractor/livestream.py index 6d7733e41..38fb3d9e4 100644 --- a/youtube_dl/extractor/livestream.py +++ b/youtube_dl/extractor/livestream.py @@ -1,27 +1,29 @@ from __future__ import unicode_literals import re -import json import itertools from .common import InfoExtractor from ..compat import ( compat_str, - compat_urllib_parse_urlparse, compat_urlparse, ) from ..utils import ( - ExtractorError, find_xpath_attr, - int_or_none, - orderedSet, + xpath_attr, xpath_with_ns, + xpath_text, + orderedSet, + int_or_none, + float_or_none, + parse_iso8601, + determine_ext, ) class LivestreamIE(InfoExtractor): IE_NAME = 'livestream' - _VALID_URL = r'https?://(?:new\.)?livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])' + _VALID_URL = r'https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?' _TESTS = [{ 'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370', 'md5': '53274c76ba7754fb0e8d072716f2292b', @@ -29,7 +31,9 @@ class LivestreamIE(InfoExtractor): 'id': '4719370', 'ext': 'mp4', 'title': 'Live from Webster Hall NYC', + 'timestamp': 1350008072, 'upload_date': '20121012', + 'duration': 5968.0, 'like_count': int, 'view_count': int, 'thumbnail': 're:^http://.*\.jpg$' @@ -55,39 +59,20 @@ class LivestreamIE(InfoExtractor): 'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015', 'only_matching': True, }] + _API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s' + + def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): + base_ele = find_xpath_attr( + smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase') + base = base_ele.get('content') if base_ele else 'http://livestreamvod-f.akamaihd.net/' - def _parse_smil(self, video_id, smil_url): formats = [] - _SWITCH_XPATH = ( - './/{http://www.w3.org/2001/SMIL20/Language}body/' - '{http://www.w3.org/2001/SMIL20/Language}switch') - smil_doc = self._download_xml( - smil_url, video_id, - note='Downloading SMIL information', - errnote='Unable to download SMIL information', - fatal=False) - if smil_doc is False: # Download failed - return formats - title_node = find_xpath_attr( - smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta', - 'name', 'title') - if title_node is None: - self.report_warning('Cannot find SMIL id') - switch_node = smil_doc.find(_SWITCH_XPATH) - else: - title_id = title_node.attrib['content'] - switch_node = find_xpath_attr( - smil_doc, _SWITCH_XPATH, 'id', title_id) - if switch_node is None: - raise ExtractorError('Cannot find switch node') - video_nodes = switch_node.findall( - '{http://www.w3.org/2001/SMIL20/Language}video') + video_nodes = smil.findall(self._xpath_ns('.//video', namespace)) for vn in video_nodes: - tbr = int_or_none(vn.attrib.get('system-bitrate')) + tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000) furl = ( - 'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' % - (vn.attrib['src'])) + '%s%s?v=3.0.3&fp=WIN%%2014,0,0,145' % (base, vn.attrib['src'])) if 'clipBegin' in vn.attrib: furl += '&ssek=' + vn.attrib['clipBegin'] formats.append({ @@ -106,97 +91,141 @@ class LivestreamIE(InfoExtractor): ('sd', 'progressive_url'), ('hd', 'progressive_url_hd'), ) - formats = [{ - 'format_id': format_id, - 'url': video_data[key], - 'quality': i + 1, - } for i, (format_id, key) in enumerate(FORMAT_KEYS) - if video_data.get(key)] + + formats = [] + for format_id, key in FORMAT_KEYS: + video_url = video_data.get(key) + if video_url: + ext = determine_ext(video_url) + if ext == 'm3u8': + continue + bitrate = int_or_none(self._search_regex( + r'(\d+)\.%s' % ext, video_url, 'bitrate', default=None)) + formats.append({ + 'url': video_url, + 'format_id': format_id, + 'tbr': bitrate, + 'ext': ext, + }) smil_url = video_data.get('smil_url') if smil_url: - formats.extend(self._parse_smil(video_id, smil_url)) + formats.extend(self._extract_smil_formats(smil_url, video_id)) + + m3u8_url = video_data.get('m3u8_url') + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) + + f4m_url = video_data.get('f4m_url') + if f4m_url: + formats.extend(self._extract_f4m_formats( + f4m_url, video_id, f4m_id='hds', fatal=False)) self._sort_formats(formats) + comments = [{ + 'author_id': comment.get('author_id'), + 'author': comment.get('author', {}).get('full_name'), + 'id': comment.get('id'), + 'text': comment['text'], + 'timestamp': parse_iso8601(comment.get('created_at')), + } for comment in video_data.get('comments', {}).get('data', [])] + return { 'id': video_id, 'formats': formats, 'title': video_data['caption'], + 'description': video_data.get('description'), 'thumbnail': video_data.get('thumbnail_url'), - 'upload_date': video_data['updated_at'].replace('-', '')[:8], + 'duration': float_or_none(video_data.get('duration'), 1000), + 'timestamp': parse_iso8601(video_data.get('publish_at')), 'like_count': video_data.get('likes', {}).get('total'), + 'comment_count': video_data.get('comments', {}).get('total'), 'view_count': video_data.get('views'), + 'comments': comments, } - def _extract_event(self, info): - event_id = compat_str(info['id']) - account = compat_str(info['owner_account_id']) - root_url = ( - 'https://new.livestream.com/api/accounts/{account}/events/{event}/' - 'feed.json'.format(account=account, event=event_id)) - - def _extract_videos(): - last_video = None - for i in itertools.count(1): - if last_video is None: - info_url = root_url - else: - info_url = '{root}?&id={id}&newer=-1&type=video'.format( - root=root_url, id=last_video) - videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data'] - videos_info = [v['data'] for v in videos_info if v['type'] == 'video'] - if not videos_info: - break - for v in videos_info: - yield self._extract_video_info(v) - last_video = videos_info[-1]['id'] - return self.playlist_result(_extract_videos(), event_id, info['full_name']) + def _extract_stream_info(self, stream_info): + broadcast_id = stream_info['broadcast_id'] + is_live = stream_info.get('is_live') + + formats = [] + smil_url = stream_info.get('play_url') + if smil_url: + formats.extend(self._extract_smil_formats(smil_url, broadcast_id)) + + entry_protocol = 'm3u8' if is_live else 'm3u8_native' + m3u8_url = stream_info.get('m3u8_url') + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, broadcast_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=False)) + + rtsp_url = stream_info.get('rtsp_url') + if rtsp_url: + formats.append({ + 'url': rtsp_url, + 'format_id': 'rtsp', + }) + self._sort_formats(formats) + + return { + 'id': broadcast_id, + 'formats': formats, + 'title': self._live_title(stream_info['stream_title']) if is_live else stream_info['stream_title'], + 'thumbnail': stream_info.get('thumbnail_url'), + 'is_live': is_live, + } + + def _extract_event(self, event_data): + event_id = compat_str(event_data['id']) + account_id = compat_str(event_data['owner_account_id']) + feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json' + + stream_info = event_data.get('stream_info') + if stream_info: + return self._extract_stream_info(stream_info) + + last_video = None + entries = [] + for i in itertools.count(1): + if last_video is None: + info_url = feed_root_url + else: + info_url = '{root}?&id={id}&newer=-1&type=video'.format( + root=feed_root_url, id=last_video) + videos_info = self._download_json( + info_url, event_id, 'Downloading page {0}'.format(i))['data'] + videos_info = [v['data'] for v in videos_info if v['type'] == 'video'] + if not videos_info: + break + for v in videos_info: + entries.append(self.url_result( + 'http://livestream.com/accounts/%s/events/%s/videos/%s' % (account_id, event_id, v['id']), + 'Livestream', v['id'], v['caption'])) + last_video = videos_info[-1]['id'] + return self.playlist_result(entries, event_id, event_data['full_name']) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - event_name = mobj.group('event_name') - webpage = self._download_webpage(url, video_id or event_name) - - og_video = self._og_search_video_url( - webpage, 'player url', fatal=False, default=None) - if og_video is not None: - query_str = compat_urllib_parse_urlparse(og_video).query - query = compat_urlparse.parse_qs(query_str) - if 'play_url' in query: - api_url = query['play_url'][0].replace('.smil', '') - info = json.loads(self._download_webpage( - api_url, video_id, 'Downloading video info')) - return self._extract_video_info(info) - - config_json = self._search_regex( - r'window.config = ({.*?});', webpage, 'window config') - info = json.loads(config_json)['event'] - - def is_relevant(vdata, vid): - result = vdata['type'] == 'video' - if video_id is not None: - result = result and compat_str(vdata['data']['id']) == vid - return result - - if video_id is None: - # This is an event page: - return self._extract_event(info) + event = mobj.group('event_id') or mobj.group('event_name') + account = mobj.group('account_id') or mobj.group('account_name') + api_url = self._API_URL_TEMPLATE % (account, event) + if video_id: + video_data = self._download_json( + api_url + '/videos/%s' % video_id, video_id) + return self._extract_video_info(video_data) else: - videos = [self._extract_video_info(video_data['data']) - for video_data in info['feed']['data'] - if is_relevant(video_data, video_id)] - if not videos: - raise ExtractorError('Cannot find video %s' % video_id) - return videos[0] + event_data = self._download_json(api_url, video_id) + return self._extract_event(event_data) # The original version of Livestream uses a different system class LivestreamOriginalIE(InfoExtractor): IE_NAME = 'livestream:original' _VALID_URL = r'''(?x)https?://original\.livestream\.com/ - (?P<user>[^/]+)/(?P<type>video|folder) - (?:\?.*?Id=|/)(?P<id>.*?)(&|$) + (?P<user>[^/\?#]+)(?:/(?P<type>video|folder) + (?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)? ''' _TESTS = [{ 'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', @@ -204,6 +233,8 @@ class LivestreamOriginalIE(InfoExtractor): 'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb', 'ext': 'mp4', 'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital', + 'duration': 771.301, + 'view_count': int, }, }, { 'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3', @@ -211,26 +242,60 @@ class LivestreamOriginalIE(InfoExtractor): 'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3', }, 'playlist_mincount': 4, + }, { + # live stream + 'url': 'http://original.livestream.com/znsbahamas', + 'only_matching': True, }] - def _extract_video(self, user, video_id): - api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id) - + def _extract_video_info(self, user, video_id): + api_url = 'http://x%sx.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id=%s' % (user, video_id) info = self._download_xml(api_url, video_id) - # this url is used on mobile devices - stream_url = 'http://x{0}x.api.channel.livestream.com/3.0/getstream.json?id={1}'.format(user, video_id) - stream_info = self._download_json(stream_url, video_id) + item = info.find('channel').find('item') - ns = {'media': 'http://search.yahoo.com/mrss'} - thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url'] + title = xpath_text(item, 'title') + media_ns = {'media': 'http://search.yahoo.com/mrss'} + thumbnail_url = xpath_attr( + item, xpath_with_ns('media:thumbnail', media_ns), 'url') + duration = float_or_none(xpath_attr( + item, xpath_with_ns('media:content', media_ns), 'duration')) + ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'} + view_count = int_or_none(xpath_text( + item, xpath_with_ns('ls:viewsCount', ls_ns))) return { 'id': video_id, - 'title': item.find('title').text, - 'url': stream_info['progressiveUrl'], + 'title': title, 'thumbnail': thumbnail_url, + 'duration': duration, + 'view_count': view_count, } + def _extract_video_formats(self, video_data, video_id, entry_protocol): + formats = [] + + progressive_url = video_data.get('progressiveUrl') + if progressive_url: + formats.append({ + 'url': progressive_url, + 'format_id': 'http', + }) + + m3u8_url = video_data.get('httpUrl') + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', entry_protocol, m3u8_id='hls', fatal=False)) + + rtsp_url = video_data.get('rtspUrl') + if rtsp_url: + formats.append({ + 'url': rtsp_url, + 'format_id': 'rtsp', + }) + + self._sort_formats(formats) + return formats + def _extract_folder(self, url, folder_id): webpage = self._download_webpage(url, folder_id) paths = orderedSet(re.findall( @@ -239,24 +304,45 @@ class LivestreamOriginalIE(InfoExtractor): <a\s+href="(?=https?://livestre\.am/) )([^"]+)"''', webpage)) - return { - '_type': 'playlist', - 'id': folder_id, - 'entries': [{ - '_type': 'url', - 'url': compat_urlparse.urljoin(url, p), - } for p in paths], - } + entries = [{ + '_type': 'url', + 'url': compat_urlparse.urljoin(url, p), + } for p in paths] + + return self.playlist_result(entries, folder_id) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - id = mobj.group('id') user = mobj.group('user') url_type = mobj.group('type') + content_id = mobj.group('id') if url_type == 'folder': - return self._extract_folder(url, id) + return self._extract_folder(url, content_id) else: - return self._extract_video(user, id) + # this url is used on mobile devices + stream_url = 'http://x%sx.api.channel.livestream.com/3.0/getstream.json' % user + info = {} + if content_id: + stream_url += '?id=%s' % content_id + info = self._extract_video_info(user, content_id) + else: + content_id = user + webpage = self._download_webpage(url, content_id) + info = { + 'title': self._og_search_title(webpage), + 'description': self._og_search_description(webpage), + 'thumbnail': self._search_regex(r'channelLogo.src\s*=\s*"([^"]+)"', webpage, 'thumbnail', None), + } + video_data = self._download_json(stream_url, content_id) + is_live = video_data.get('isLive') + entry_protocol = 'm3u8' if is_live else 'm3u8_native' + info.update({ + 'id': content_id, + 'title': self._live_title(info['title']) if is_live else info['title'], + 'formats': self._extract_video_formats(video_data, content_id, entry_protocol), + 'is_live': is_live, + }) + return info # The server doesn't support HEAD request, the generic extractor can't detect diff --git a/youtube_dl/extractor/lrt.py b/youtube_dl/extractor/lrt.py index e3236f7b5..863efd896 100644 --- a/youtube_dl/extractor/lrt.py +++ b/youtube_dl/extractor/lrt.py @@ -1,12 +1,9 @@ # coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor from ..utils import ( - determine_ext, - js_to_json, + int_or_none, parse_duration, remove_end, ) @@ -23,9 +20,11 @@ class LRTIE(InfoExtractor): 'title': 'Septynios Kauno dienos', 'description': 'md5:24d84534c7dc76581e59f5689462411a', 'duration': 1783, + 'view_count': int, + 'like_count': int, }, 'params': { - 'skip_download': True, # HLS download + 'skip_download': True, # m3u8 download }, } @@ -34,29 +33,23 @@ class LRTIE(InfoExtractor): webpage = self._download_webpage(url, video_id) title = remove_end(self._og_search_title(webpage), ' - LRT') + m3u8_url = self._search_regex( + r'file\s*:\s*(["\'])(?P<url>.+?)\1\s*\+\s*location\.hash\.substring\(1\)', + webpage, 'm3u8 url', group='url') + formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4') + thumbnail = self._og_search_thumbnail(webpage) description = self._og_search_description(webpage) duration = parse_duration(self._search_regex( - r"'duration':\s*'([^']+)',", webpage, - 'duration', fatal=False, default=None)) + r'var\s+record_len\s*=\s*(["\'])(?P<duration>[0-9]+:[0-9]+:[0-9]+)\1', + webpage, 'duration', default=None, group='duration')) - formats = [] - for js in re.findall(r'(?s)config:\s*(\{.*?\})', webpage): - data = self._parse_json(js, video_id, transform_source=js_to_json) - if 'provider' not in data: - continue - if data['provider'] == 'rtmp': - formats.append({ - 'format_id': 'rtmp', - 'ext': determine_ext(data['file']), - 'url': data['streamer'], - 'play_path': 'mp4:%s' % data['file'], - 'preference': -1, - 'rtmp_real_time': True, - }) - else: - formats.extend( - self._extract_m3u8_formats(data['file'], video_id, 'mp4')) + view_count = int_or_none(self._html_search_regex( + r'<div[^>]+class=(["\']).*?record-desc-seen.*?\1[^>]*>(?P<count>.+?)</div>', + webpage, 'view count', fatal=False, group='count')) + like_count = int_or_none(self._search_regex( + r'<span[^>]+id=(["\'])flikesCount.*?\1>(?P<count>\d+)<', + webpage, 'like count', fatal=False, group='count')) return { 'id': video_id, @@ -65,4 +58,6 @@ class LRTIE(InfoExtractor): 'thumbnail': thumbnail, 'description': description, 'duration': duration, + 'view_count': view_count, + 'like_count': like_count, } diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py index 378117270..d4e1ae99d 100644 --- a/youtube_dl/extractor/lynda.py +++ b/youtube_dl/extractor/lynda.py @@ -7,12 +7,12 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, ) from ..utils import ( ExtractorError, clean_html, int_or_none, + sanitized_Request, ) @@ -25,7 +25,7 @@ class LyndaBaseIE(InfoExtractor): self._login() def _login(self): - (username, password) = self._get_login_info() + username, password = self._get_login_info() if username is None: return @@ -35,7 +35,7 @@ class LyndaBaseIE(InfoExtractor): 'remember': 'false', 'stayPut': 'false' } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) login_page = self._download_webpage( request, None, 'Logging in as %s' % username) @@ -64,7 +64,7 @@ class LyndaBaseIE(InfoExtractor): 'remember': 'false', 'stayPut': 'false', } - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8')) login_page = self._download_webpage( request, None, @@ -82,6 +82,15 @@ class LyndaBaseIE(InfoExtractor): expected=True) raise ExtractorError('Unable to log in') + def _logout(self): + username, _ = self._get_login_info() + if username is None: + return + + self._download_webpage( + 'http://www.lynda.com/ajax/logout.aspx', None, + 'Logging out', 'Unable to log out', fatal=False) + class LyndaIE(LyndaBaseIE): IE_NAME = 'lynda' @@ -108,50 +117,47 @@ class LyndaIE(LyndaBaseIE): def _real_extract(self, url): video_id = self._match_id(url) - page = self._download_webpage( + video = self._download_json( 'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id, 'Downloading video JSON') - video_json = json.loads(page) - if 'Status' in video_json: + if 'Status' in video: raise ExtractorError( - 'lynda returned error: %s' % video_json['Message'], expected=True) + 'lynda returned error: %s' % video['Message'], expected=True) - if video_json['HasAccess'] is False: + if video.get('HasAccess') is False: self.raise_login_required('Video %s is only available for members' % video_id) - video_id = compat_str(video_json['ID']) - duration = video_json['DurationInSeconds'] - title = video_json['Title'] + video_id = compat_str(video.get('ID') or video_id) + duration = int_or_none(video.get('DurationInSeconds')) + title = video['Title'] formats = [] - fmts = video_json.get('Formats') + fmts = video.get('Formats') if fmts: - formats.extend([ - { - 'url': fmt['Url'], - 'ext': fmt['Extension'], - 'width': fmt['Width'], - 'height': fmt['Height'], - 'filesize': fmt['FileSize'], - 'format_id': str(fmt['Resolution']) - } for fmt in fmts]) - - prioritized_streams = video_json.get('PrioritizedStreams') + formats.extend([{ + 'url': f['Url'], + 'ext': f.get('Extension'), + 'width': int_or_none(f.get('Width')), + 'height': int_or_none(f.get('Height')), + 'filesize': int_or_none(f.get('FileSize')), + 'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None, + } for f in fmts if f.get('Url')]) + + prioritized_streams = video.get('PrioritizedStreams') if prioritized_streams: - formats.extend([ - { + for prioritized_stream_id, prioritized_stream in prioritized_streams.items(): + formats.extend([{ 'url': video_url, 'width': int_or_none(format_id), - 'format_id': format_id, - } for format_id, video_url in prioritized_streams['0'].items() - ]) + 'format_id': '%s-%s' % (prioritized_stream_id, format_id), + } for format_id, video_url in prioritized_stream.items()]) self._check_formats(formats, video_id) self._sort_formats(formats) - subtitles = self.extract_subtitles(video_id, page) + subtitles = self.extract_subtitles(video_id) return { 'id': video_id, @@ -182,7 +188,7 @@ class LyndaIE(LyndaBaseIE): if srt: return srt - def _get_subtitles(self, video_id, webpage): + def _get_subtitles(self, video_id): url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id subs = self._download_json(url, None, False) if subs: @@ -204,12 +210,13 @@ class LyndaCourseIE(LyndaBaseIE): course_path = mobj.group('coursepath') course_id = mobj.group('courseid') - page = self._download_webpage( + course = self._download_json( 'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id, course_id, 'Downloading course JSON') - course_json = json.loads(page) - if 'Status' in course_json and course_json['Status'] == 'NotFound': + self._logout() + + if course.get('Status') == 'NotFound': raise ExtractorError( 'Course %s does not exist' % course_id, expected=True) @@ -219,12 +226,13 @@ class LyndaCourseIE(LyndaBaseIE): # Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided # by single video API anymore - for chapter in course_json['Chapters']: - for video in chapter['Videos']: - if video['HasAccess'] is False: + for chapter in course['Chapters']: + for video in chapter.get('Videos', []): + if video.get('HasAccess') is False: unaccessible_videos += 1 continue - videos.append(video['ID']) + if video.get('ID'): + videos.append(video['ID']) if unaccessible_videos > 0: self._downloader.report_warning( @@ -237,6 +245,6 @@ class LyndaCourseIE(LyndaBaseIE): 'Lynda') for video_id in videos] - course_title = course_json['Title'] + course_title = course.get('Title') return self.playlist_result(entries, course_id, course_title) diff --git a/youtube_dl/extractor/makertv.py b/youtube_dl/extractor/makertv.py new file mode 100644 index 000000000..3c34d4604 --- /dev/null +++ b/youtube_dl/extractor/makertv.py @@ -0,0 +1,32 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class MakerTVIE(InfoExtractor): + _VALID_URL = r'https?://(?:(?:www\.)?maker\.tv/(?:[^/]+/)*video|makerplayer.com/embed/maker)/(?P<id>[a-zA-Z0-9]{12})' + _TEST = { + 'url': 'http://www.maker.tv/video/Fh3QgymL9gsc', + 'md5': 'ca237a53a8eb20b6dc5bd60564d4ab3e', + 'info_dict': { + 'id': 'Fh3QgymL9gsc', + 'ext': 'mp4', + 'title': 'Maze Runner: The Scorch Trials Official Movie Review', + 'description': 'md5:11ff3362d7ef1d679fdb649f6413975a', + 'upload_date': '20150918', + 'timestamp': 1442549540, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + jwplatform_id = self._search_regex(r'jw_?id="([^"]+)"', webpage, 'jwplatform id') + + return { + '_type': 'url_transparent', + 'id': video_id, + 'url': 'jwplatform:%s' % jwplatform_id, + 'ie_key': 'JWPlatform', + } diff --git a/youtube_dl/extractor/mdr.py b/youtube_dl/extractor/mdr.py index fc7499958..88334889e 100644 --- a/youtube_dl/extractor/mdr.py +++ b/youtube_dl/extractor/mdr.py @@ -1,64 +1,169 @@ +# coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + determine_ext, + int_or_none, + parse_duration, + parse_iso8601, + xpath_text, +) class MDRIE(InfoExtractor): - _VALID_URL = r'^(?P<domain>https?://(?:www\.)?mdr\.de)/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)(?:_|\.html)' + IE_DESC = 'MDR.DE and KiKA' + _VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html' - # No tests, MDR regularily deletes its videos - _TEST = { + _TESTS = [{ + # MDR regularily deletes its videos 'url': 'http://www.mdr.de/fakt/video189002.html', 'only_matching': True, - } + }, { + # audio + 'url': 'http://www.mdr.de/kultur/audio1312272_zc-15948bad_zs-86171fdd.html', + 'md5': '64c4ee50f0a791deb9479cd7bbe9d2fa', + 'info_dict': { + 'id': '1312272', + 'ext': 'mp3', + 'title': 'Feuilleton vom 30. Oktober 2015', + 'duration': 250, + 'uploader': 'MITTELDEUTSCHER RUNDFUNK', + }, + }, { + 'url': 'http://www.kika.de/baumhaus/videos/video19636.html', + 'md5': '4930515e36b06c111213e80d1e4aad0e', + 'info_dict': { + 'id': '19636', + 'ext': 'mp4', + 'title': 'Baumhaus vom 30. Oktober 2015', + 'duration': 134, + 'uploader': 'KIKA', + }, + }, { + 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/videos/video8182.html', + 'md5': '5fe9c4dd7d71e3b238f04b8fdd588357', + 'info_dict': { + 'id': '8182', + 'ext': 'mp4', + 'title': 'Beutolomäus und der geheime Weihnachtswunsch', + 'description': 'md5:b69d32d7b2c55cbe86945ab309d39bbd', + 'timestamp': 1419047100, + 'upload_date': '20141220', + 'duration': 4628, + 'uploader': 'KIKA', + }, + }, { + 'url': 'http://www.kika.de/baumhaus/sendungen/video19636_zc-fea7f8a0_zs-4bf89c60.html', + 'only_matching': True, + }, { + 'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html', + 'only_matching': True, + }] def _real_extract(self, url): - m = re.match(self._VALID_URL, url) - video_id = m.group('video_id') - domain = m.group('domain') + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + data_url = self._search_regex( + r'dataURL\s*:\s*(["\'])(?P<url>/.+/(?:video|audio)[0-9]+-avCustom\.xml)\1', + webpage, 'data url', group='url') - # determine title and media streams from webpage - html = self._download_webpage(url, video_id) + doc = self._download_xml( + compat_urlparse.urljoin(url, data_url), video_id) - title = self._html_search_regex(r'<h[12]>(.*?)</h[12]>', html, 'title') - xmlurl = self._search_regex( - r'dataURL:\'(/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, 'XML URL') + title = xpath_text(doc, ['./title', './broadcast/broadcastName'], 'title', fatal=True) - doc = self._download_xml(domain + xmlurl, video_id) formats = [] - for a in doc.findall('./assets/asset'): - url_el = a.find('./progressiveDownloadUrl') - if url_el is None: - continue - abr = int(a.find('bitrateAudio').text) // 1000 - media_type = a.find('mediaType').text - format = { - 'abr': abr, - 'filesize': int(a.find('fileSize').text), - 'url': url_el.text, - } - - vbr_el = a.find('bitrateVideo') - if vbr_el is None: - format.update({ - 'vcodec': 'none', - 'format_id': '%s-%d' % (media_type, abr), - }) - else: - vbr = int(vbr_el.text) // 1000 - format.update({ - 'vbr': vbr, - 'width': int(a.find('frameWidth').text), - 'height': int(a.find('frameHeight').text), - 'format_id': '%s-%d' % (media_type, vbr), - }) - formats.append(format) + processed_urls = [] + for asset in doc.findall('./assets/asset'): + for source in ( + 'progressiveDownload', + 'dynamicHttpStreamingRedirector', + 'adaptiveHttpStreamingRedirector'): + url_el = asset.find('./%sUrl' % source) + if url_el is None: + continue + + video_url = url_el.text + if video_url in processed_urls: + continue + + processed_urls.append(video_url) + + vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) + abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) + + ext = determine_ext(url_el.text) + if ext == 'm3u8': + url_formats = self._extract_m3u8_formats( + video_url, video_id, 'mp4', entry_protocol='m3u8_native', + preference=0, m3u8_id='HLS', fatal=False) + elif ext == 'f4m': + url_formats = self._extract_f4m_formats( + video_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, + preference=0, f4m_id='HDS', fatal=False) + else: + media_type = xpath_text(asset, './mediaType', 'media type', default='MP4') + vbr = int_or_none(xpath_text(asset, './bitrateVideo', 'vbr'), 1000) + abr = int_or_none(xpath_text(asset, './bitrateAudio', 'abr'), 1000) + filesize = int_or_none(xpath_text(asset, './fileSize', 'file size')) + + f = { + 'url': video_url, + 'format_id': '%s-%d' % (media_type, vbr or abr), + 'filesize': filesize, + 'abr': abr, + 'preference': 1, + } + + if vbr: + width = int_or_none(xpath_text(asset, './frameWidth', 'width')) + height = int_or_none(xpath_text(asset, './frameHeight', 'height')) + f.update({ + 'vbr': vbr, + 'width': width, + 'height': height, + }) + + url_formats = [f] + + if not url_formats: + continue + + if not vbr: + for f in url_formats: + abr = f.get('tbr') or abr + if 'tbr' in f: + del f['tbr'] + f.update({ + 'abr': abr, + 'vcodec': 'none', + }) + + formats.extend(url_formats) + self._sort_formats(formats) + description = xpath_text(doc, './broadcast/broadcastDescription', 'description') + timestamp = parse_iso8601( + xpath_text( + doc, [ + './broadcast/broadcastDate', + './broadcast/broadcastStartDate', + './broadcast/broadcastEndDate'], + 'timestamp', default=None)) + duration = parse_duration(xpath_text(doc, './duration', 'duration')) + uploader = xpath_text(doc, './rights', 'uploader') + return { 'id': video_id, 'title': title, + 'description': description, + 'timestamp': timestamp, + 'duration': duration, + 'uploader': uploader, 'formats': formats, } diff --git a/youtube_dl/extractor/megavideoz.py b/youtube_dl/extractor/megavideoz.py deleted file mode 100644 index af7ff07ea..000000000 --- a/youtube_dl/extractor/megavideoz.py +++ /dev/null @@ -1,56 +0,0 @@ -# encoding: utf-8 -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..utils import ( - ExtractorError, - float_or_none, - xpath_text, -) - - -class MegaVideozIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?megavideoz\.eu/video/(?P<id>[^/]+)(?:/(?P<display_id>[^/]+))?' - _TEST = { - 'url': 'http://megavideoz.eu/video/WM6UB919XMXH/SMPTE-Universal-Film-Leader', - 'info_dict': { - 'id': '48723', - 'display_id': 'SMPTE-Universal-Film-Leader', - 'ext': 'mp4', - 'title': 'SMPTE Universal Film Leader', - 'thumbnail': 're:https?://.*?\.jpg', - 'duration': 10.93, - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - display_id = mobj.group('display_id') or video_id - - webpage = self._download_webpage(url, display_id) - - if any(p in webpage for p in ('>Video Not Found<', '>404 Error<')): - raise ExtractorError('Video %s does not exist' % video_id, expected=True) - - config = self._download_xml( - self._search_regex( - r"var\s+cnf\s*=\s*'([^']+)'", webpage, 'cnf url'), - display_id) - - video_url = xpath_text(config, './file', 'video url', fatal=True) - title = xpath_text(config, './title', 'title', fatal=True) - thumbnail = xpath_text(config, './image', 'thumbnail') - duration = float_or_none(xpath_text(config, './duration', 'duration')) - video_id = xpath_text(config, './mediaid', 'video id') or video_id - - return { - 'id': video_id, - 'display_id': display_id, - 'url': video_url, - 'title': title, - 'thumbnail': thumbnail, - 'duration': duration - } diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index 6e2e73a51..67d6271e1 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -7,12 +7,12 @@ from ..compat import ( compat_parse_qs, compat_urllib_parse, compat_urllib_parse_unquote, - compat_urllib_request, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, + sanitized_Request, ) @@ -117,7 +117,7 @@ class MetacafeIE(InfoExtractor): 'filters': '0', 'submit': "Continue - I'm over 18", } - request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) + request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') self.report_age_confirmation() self._download_webpage(request, None, False, 'Unable to confirm age') @@ -142,7 +142,7 @@ class MetacafeIE(InfoExtractor): return self.url_result('theplatform:%s' % ext_id, 'ThePlatform') # Retrieve video webpage to extract further information - req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) + req = sanitized_Request('http://www.metacafe.com/watch/%s/' % video_id) # AnyClip videos require the flashversion cookie so that we get the link # to the mp4 file @@ -154,10 +154,10 @@ class MetacafeIE(InfoExtractor): # Extract URL, uploader and title from webpage self.report_extraction(video_id) video_url = None - mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) + mobj = re.search(r'(?m)&(?:media|video)URL=([^&]+)', webpage) if mobj is not None: mediaURL = compat_urllib_parse_unquote(mobj.group(1)) - video_ext = mediaURL[-3:] + video_ext = determine_ext(mediaURL) # Extract gdaKey if available mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) @@ -229,7 +229,7 @@ class MetacafeIE(InfoExtractor): age_limit = ( 18 - if re.search(r'"contentRating":"restricted"', webpage) + if re.search(r'(?:"contentRating":|"rating",)"restricted"', webpage) else 0) if isinstance(video_url, list): diff --git a/youtube_dl/extractor/minhateca.py b/youtube_dl/extractor/minhateca.py index 14934b7ec..e46b23a6f 100644 --- a/youtube_dl/extractor/minhateca.py +++ b/youtube_dl/extractor/minhateca.py @@ -2,14 +2,12 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( int_or_none, parse_duration, parse_filesize, + sanitized_Request, ) @@ -39,7 +37,7 @@ class MinhatecaIE(InfoExtractor): ('fileId', video_id), ('__RequestVerificationToken', token), ] - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://minhateca.com.br/action/License/Download', data=compat_urllib_parse.urlencode(token_data)) req.add_header('Content-Type', 'application/x-www-form-urlencoded') diff --git a/youtube_dl/extractor/miomio.py b/youtube_dl/extractor/miomio.py index a784fc5fb..170ebd9eb 100644 --- a/youtube_dl/extractor/miomio.py +++ b/youtube_dl/extractor/miomio.py @@ -8,6 +8,7 @@ from ..utils import ( xpath_text, int_or_none, ExtractorError, + sanitized_Request, ) @@ -51,6 +52,8 @@ class MioMioIE(InfoExtractor): mioplayer_path = self._search_regex( r'src="(/mioplayer/[^"]+)"', webpage, 'ref_path') + http_headers = {'Referer': 'http://www.miomio.tv%s' % mioplayer_path} + xml_config = self._search_regex( r'flashvars="type=(?:sina|video)&(.+?)&', webpage, 'xml config') @@ -60,14 +63,12 @@ class MioMioIE(InfoExtractor): 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)), video_id) - # the following xml contains the actual configuration information on the video file(s) - vid_config = self._download_xml( + vid_config_request = sanitized_Request( 'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config), - video_id) + headers=http_headers) - http_headers = { - 'Referer': 'http://www.miomio.tv%s' % mioplayer_path, - } + # the following xml contains the actual configuration information on the video file(s) + vid_config = self._download_xml(vid_config_request, video_id) if not int_or_none(xpath_text(vid_config, 'timelength')): raise ExtractorError('Unable to load videos!', expected=True) diff --git a/youtube_dl/extractor/mit.py b/youtube_dl/extractor/mit.py index f088ab9e2..29ca45778 100644 --- a/youtube_dl/extractor/mit.py +++ b/youtube_dl/extractor/mit.py @@ -86,7 +86,7 @@ class MITIE(TechTVMITIE): webpage = self._download_webpage(url, page_title) embed_url = self._search_regex( r'<iframe .*?src="(.+?)"', webpage, 'embed url') - return self.url_result(embed_url, ie='TechTVMIT') + return self.url_result(embed_url) class OCWMITIE(InfoExtractor): diff --git a/youtube_dl/extractor/mitele.py b/youtube_dl/extractor/mitele.py index 54993e2c9..c595f2077 100644 --- a/youtube_dl/extractor/mitele.py +++ b/youtube_dl/extractor/mitele.py @@ -1,7 +1,10 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import compat_urllib_parse +from ..compat import ( + compat_urllib_parse, + compat_urlparse, +) from ..utils import ( encode_dict, get_element_by_attribute, @@ -15,7 +18,7 @@ class MiTeleIE(InfoExtractor): _TESTS = [{ 'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/', - 'md5': 'ace7635b2a0b286aaa37d3ff192d2a8a', + 'md5': '0ff1a13aebb35d9bc14081ff633dd324', 'info_dict': { 'id': '0NF1jJnxS1Wu3pHrmvFyw2', 'display_id': 'programa-144', @@ -34,6 +37,7 @@ class MiTeleIE(InfoExtractor): config_url = self._search_regex( r'data-config\s*=\s*"([^"]+)"', webpage, 'data config url') + config_url = compat_urlparse.urljoin(url, config_url) config = self._download_json( config_url, display_id, 'Downloading config JSON') @@ -56,7 +60,7 @@ class MiTeleIE(InfoExtractor): 'sta': '0', } media = self._download_json( - '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data)).encode('utf-8')), + '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))), display_id, 'Downloading %s JSON' % location['loc']) file_ = media.get('file') if not file_: diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py index d47aeceda..c2b7ed9ab 100644 --- a/youtube_dl/extractor/mixcloud.py +++ b/youtube_dl/extractor/mixcloud.py @@ -64,7 +64,8 @@ class MixcloudIE(InfoExtractor): preview_url = self._search_regex( r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url') - song_url = preview_url.replace('/previews/', '/c/originals/') + song_url = re.sub(r'audiocdn(\d+)', r'stream\1', preview_url) + song_url = song_url.replace('/previews/', '/c/originals/') if not self._check_url(song_url, track_id, 'mp3'): song_url = song_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/') if not self._check_url(song_url, track_id, 'm4a'): diff --git a/youtube_dl/extractor/moevideo.py b/youtube_dl/extractor/moevideo.py index 5a66302f6..d930b9634 100644 --- a/youtube_dl/extractor/moevideo.py +++ b/youtube_dl/extractor/moevideo.py @@ -5,13 +5,11 @@ import json import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) @@ -80,7 +78,7 @@ class MoeVideoIE(InfoExtractor): ] r_json = json.dumps(r) post = compat_urllib_parse.urlencode({'r': r_json}) - req = compat_urllib_request.Request(self._API_URL, post) + req = sanitized_Request(self._API_URL, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') response = self._download_json(req, video_id) diff --git a/youtube_dl/extractor/mofosex.py b/youtube_dl/extractor/mofosex.py index 9bf99a54a..f8226cbb2 100644 --- a/youtube_dl/extractor/mofosex.py +++ b/youtube_dl/extractor/mofosex.py @@ -7,8 +7,8 @@ from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_urlparse, - compat_urllib_request, ) +from ..utils import sanitized_Request class MofosexIE(InfoExtractor): @@ -29,7 +29,7 @@ class MofosexIE(InfoExtractor): video_id = mobj.group('id') url = 'http://www.' + mobj.group('url') - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py index 69e4bcd1a..f6bf94f2f 100644 --- a/youtube_dl/extractor/moniker.py +++ b/youtube_dl/extractor/moniker.py @@ -5,19 +5,17 @@ import os.path import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, remove_start, + sanitized_Request, ) class MonikerIE(InfoExtractor): IE_DESC = 'allmyvideos.net and vidspot.net' - _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)' + _VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?:(?:2|v)/v-)?(?P<id>[a-zA-Z0-9_-]+)' _TESTS = [{ 'url': 'http://allmyvideos.net/jih3nce3x6wn', @@ -46,6 +44,18 @@ class MonikerIE(InfoExtractor): }, { 'url': 'https://www.vidspot.net/l2ngsmhs8ci5', 'only_matching': True, + }, { + 'url': 'http://vidspot.net/2/v-ywDf99', + 'md5': '5f8254ce12df30479428b0152fb8e7ba', + 'info_dict': { + 'id': 'ywDf99', + 'ext': 'mp4', + 'title': 'IL FAIT LE MALIN EN PORSHE CAYENNE ( mais pas pour longtemps)', + 'description': 'IL FAIT LE MALIN EN PORSHE CAYENNE.', + }, + }, { + 'url': 'http://allmyvideos.net/v/v-HXZm5t', + 'only_matching': True, }] def _real_extract(self, url): @@ -64,18 +74,30 @@ class MonikerIE(InfoExtractor): raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error), expected=True) - fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage) - data = dict(fields) + builtin_url = self._search_regex( + r'<iframe[^>]+src=(["\'])(?P<url>.+?/builtin-.+?)\1', + orig_webpage, 'builtin URL', default=None, group='url') - post = compat_urllib_parse.urlencode(data) - headers = { - b'Content-Type': b'application/x-www-form-urlencoded', - } - req = compat_urllib_request.Request(url, post, headers) - webpage = self._download_webpage( - req, video_id, note='Downloading video page ...') + if builtin_url: + req = sanitized_Request(builtin_url) + req.add_header('Referer', url) + webpage = self._download_webpage(req, video_id, 'Downloading builtin page') + title = self._og_search_title(orig_webpage).strip() + description = self._og_search_description(orig_webpage).strip() + else: + fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage) + data = dict(fields) + + post = compat_urllib_parse.urlencode(data) + headers = { + b'Content-Type': b'application/x-www-form-urlencoded', + } + req = sanitized_Request(url, post, headers) + webpage = self._download_webpage( + req, video_id, note='Downloading video page ...') - title = os.path.splitext(data['fname'])[0] + title = os.path.splitext(data['fname'])[0] + description = None # Could be several links with different quality links = re.findall(r'"file" : "?(.+?)",', webpage) @@ -89,5 +111,6 @@ class MonikerIE(InfoExtractor): return { 'id': video_id, 'title': title, + 'description': description, 'formats': formats, } diff --git a/youtube_dl/extractor/mooshare.py b/youtube_dl/extractor/mooshare.py index 7603af5e2..7cc7f054f 100644 --- a/youtube_dl/extractor/mooshare.py +++ b/youtube_dl/extractor/mooshare.py @@ -3,12 +3,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -59,7 +57,7 @@ class MooshareIE(InfoExtractor): 'hash': hash_key, } - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') diff --git a/youtube_dl/extractor/movieclips.py b/youtube_dl/extractor/movieclips.py index 04e17d055..1564cb71f 100644 --- a/youtube_dl/extractor/movieclips.py +++ b/youtube_dl/extractor/movieclips.py @@ -1,80 +1,40 @@ +# coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor -from ..compat import ( - compat_str, -) -from ..utils import ( - ExtractorError, - clean_html, -) +from ..utils import sanitized_Request class MovieClipsIE(InfoExtractor): - _VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?' + _VALID_URL = r'https?://(?:www.)?movieclips\.com/videos/(?P<id>[^/?#]+)' _TEST = { - 'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/', + 'url': 'http://www.movieclips.com/videos/warcraft-trailer-1-561180739597?autoPlay=true&playlistId=5', 'info_dict': { - 'id': 'Wy7ZU', - 'display_id': 'my-week-with-marilyn-movie-do-you-love-me', + 'id': 'pKIGmG83AqD9', + 'display_id': 'warcraft-trailer-1-561180739597', 'ext': 'mp4', - 'title': 'My Week with Marilyn - Do You Love Me?', - 'description': 'md5:e86795bd332fe3cff461e7c8dc542acb', + 'title': 'Warcraft Trailer 1', + 'description': 'Watch Trailer 1 from Warcraft (2016). Legendary’s WARCRAFT is a 3D epic adventure of world-colliding conflict based.', 'thumbnail': 're:^https?://.*\.jpg$', }, - 'params': { - # rtmp download - 'skip_download': True, - } + 'add_ie': ['ThePlatform'], } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - display_id = mobj.group('display_id') - show_id = display_id or video_id - - config = self._download_xml( - 'http://config.movieclips.com/player/config/%s' % video_id, - show_id, 'Downloading player config') - - if config.find('./country-region').text == 'false': - raise ExtractorError( - '%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True) - - properties = config.find('./video/properties') - smil_file = properties.attrib['smil_file'] + display_id = self._match_id(url) - smil = self._download_xml(smil_file, show_id, 'Downloading SMIL') - base_url = smil.find('./head/meta').attrib['base'] - - formats = [] - for video in smil.findall('./body/switch/video'): - vbr = int(video.attrib['system-bitrate']) / 1000 - src = video.attrib['src'] - formats.append({ - 'url': base_url, - 'play_path': src, - 'ext': src.split(':')[0], - 'vbr': vbr, - 'format_id': '%dk' % vbr, - }) - - self._sort_formats(formats) - - title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title']) - description = clean_html(compat_str(properties.attrib['clip_description'])) - thumbnail = properties.attrib['image'] - categories = properties.attrib['clip_categories'].split(',') + req = sanitized_Request(url) + # it doesn't work if it thinks the browser it's too old + req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/43.0 (Chrome)') + webpage = self._download_webpage(req, display_id) + theplatform_link = self._html_search_regex(r'src="(http://player.theplatform.com/p/.*?)"', webpage, 'theplatform link') + title = self._html_search_regex(r'<title[^>]*>([^>]+)-\s*\d+\s*|\s*Movieclips.com</title>', webpage, 'title') + description = self._html_search_meta('description', webpage) return { - 'id': video_id, - 'display_id': display_id, + '_type': 'url_transparent', + 'url': theplatform_link, 'title': title, + 'display_id': display_id, 'description': description, - 'thumbnail': thumbnail, - 'categories': categories, - 'formats': formats, } diff --git a/youtube_dl/extractor/movshare.py b/youtube_dl/extractor/movshare.py deleted file mode 100644 index 6101063f2..000000000 --- a/youtube_dl/extractor/movshare.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import unicode_literals - -from .novamov import NovaMovIE - - -class MovShareIE(NovaMovIE): - IE_NAME = 'movshare' - IE_DESC = 'MovShare' - - _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'movshare\.(?:net|sx|ag)'} - - _HOST = 'www.movshare.net' - - _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' - _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>' - _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>' - - _TEST = { - 'url': 'http://www.movshare.net/video/559e28be54d96', - 'md5': 'abd31a2132947262c50429e1d16c1bfd', - 'info_dict': { - 'id': '559e28be54d96', - 'ext': 'flv', - 'title': 'dissapeared image', - 'description': 'optical illusion dissapeared image magic illusion', - } - } diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index a597714e9..d887583e6 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -5,7 +5,6 @@ import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_str, ) from ..utils import ( @@ -13,6 +12,7 @@ from ..utils import ( find_xpath_attr, fix_xml_ampersands, HEADRequest, + sanitized_Request, unescapeHTML, url_basename, RegexNotFoundError, @@ -53,7 +53,7 @@ class MTVServicesInfoExtractor(InfoExtractor): def _extract_mobile_video_formats(self, mtvn_id): webpage_url = self._MOBILE_TEMPLATE % mtvn_id - req = compat_urllib_request.Request(webpage_url) + req = sanitized_Request(webpage_url) # Otherwise we get a webpage that would execute some javascript req.add_header('User-Agent', 'curl/7') webpage = self._download_webpage(req, mtvn_id, @@ -200,7 +200,13 @@ class MTVServicesInfoExtractor(InfoExtractor): if mgid is None or ':' not in mgid: mgid = self._search_regex( [r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'], - webpage, 'mgid') + webpage, 'mgid', default=None) + + if not mgid: + sm4_embed = self._html_search_meta( + 'sm4:video:embed', webpage, 'sm4 embed', default='') + mgid = self._search_regex( + r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid') videos_info = self._get_videos_info(mgid) return videos_info @@ -222,6 +228,13 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor): }, } + @staticmethod + def _extract_url(webpage): + mobj = re.search( + r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage) + if mobj: + return mobj.group('url') + def _get_feed_url(self, uri): video_id = self._id_from_uri(uri) site_id = uri.replace(video_id, '') diff --git a/youtube_dl/extractor/myvideo.py b/youtube_dl/extractor/myvideo.py index c96f472a3..36ab388b2 100644 --- a/youtube_dl/extractor/myvideo.py +++ b/youtube_dl/extractor/myvideo.py @@ -11,10 +11,10 @@ from ..compat import ( compat_ord, compat_urllib_parse, compat_urllib_parse_unquote, - compat_urllib_request, ) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -83,7 +83,7 @@ class MyVideoIE(InfoExtractor): mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage) if mobj is not None: - request = compat_urllib_request.Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '') + request = sanitized_Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '') response = self._download_webpage(request, video_id, 'Downloading video info') info = json.loads(base64.b64decode(response).decode('utf-8')) diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py index 925967753..1f5fc2145 100644 --- a/youtube_dl/extractor/naver.py +++ b/youtube_dl/extractor/naver.py @@ -10,7 +10,6 @@ from ..compat import ( ) from ..utils import ( ExtractorError, - clean_html, ) @@ -46,11 +45,11 @@ class NaverIE(InfoExtractor): m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"', webpage) if m_id is None: - m_error = re.search( - r'(?s)<div class="(?:nation_error|nation_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>', - webpage) - if m_error: - raise ExtractorError(clean_html(m_error.group('msg')), expected=True) + error = self._html_search_regex( + r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>', + webpage, 'error', default=None) + if error: + raise ExtractorError(error, expected=True) raise ExtractorError('couldn\'t extract vid and key') vid = m_id.group(1) key = m_id.group(2) diff --git a/youtube_dl/extractor/nba.py b/youtube_dl/extractor/nba.py index 944096e1c..9d26030d3 100644 --- a/youtube_dl/extractor/nba.py +++ b/youtube_dl/extractor/nba.py @@ -1,63 +1,98 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( - remove_end, parse_duration, + int_or_none, + xpath_text, + xpath_attr, ) class NBAIE(InfoExtractor): - _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$' + _VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)?video/(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$' _TESTS = [{ 'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html', - 'md5': 'c0edcfc37607344e2ff8f13c378c88a4', + 'md5': '9e7729d3010a9c71506fd1248f74e4f4', 'info_dict': { - 'id': '0021200253-okc-bkn-recap.nba', - 'ext': 'mp4', + 'id': '0021200253-okc-bkn-recap', + 'ext': 'flv', 'title': 'Thunder vs. Nets', 'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.', 'duration': 181, + 'timestamp': 1354638466, + 'upload_date': '20121204', }, }, { 'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/', 'only_matching': True, }, { - 'url': 'http://watch.nba.com/nba/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba', + 'url': 'http://watch.nba.com/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba', + 'md5': 'b2b39b81cf28615ae0c3360a3f9668c4', 'info_dict': { - 'id': '0041400301-cle-atl-recap.nba', + 'id': '0041400301-cle-atl-recap', 'ext': 'mp4', - 'title': 'NBA GAME TIME | Video: Hawks vs. Cavaliers Game 1', + 'title': 'Hawks vs. Cavaliers Game 1', 'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d', 'duration': 228, - }, - 'params': { - 'skip_download': True, + 'timestamp': 1432134543, + 'upload_date': '20150520', } }] def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4' + path, video_id = re.match(self._VALID_URL, url).groups() + if path.startswith('nba/'): + path = path[3:] + video_info = self._download_xml('http://www.nba.com/%s.xml' % path, video_id) + video_id = xpath_text(video_info, 'slug') + title = xpath_text(video_info, 'headline') + description = xpath_text(video_info, 'description') + duration = parse_duration(xpath_text(video_info, 'length')) + timestamp = int_or_none(xpath_attr(video_info, 'dateCreated', 'uts')) - shortened_video_id = video_id.rpartition('/')[2] - title = remove_end( - self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com') + thumbnails = [] + for image in video_info.find('images'): + thumbnails.append({ + 'id': image.attrib.get('cut'), + 'url': image.text, + 'width': int_or_none(image.attrib.get('width')), + 'height': int_or_none(image.attrib.get('height')), + }) - description = self._og_search_description(webpage) - duration_str = self._html_search_meta( - 'duration', webpage, 'duration', default=None) - if not duration_str: - duration_str = self._html_search_regex( - r'Duration:</b>\s*(\d+:\d+)', webpage, 'duration', fatal=False) - duration = parse_duration(duration_str) + formats = [] + for video_file in video_info.findall('.//file'): + video_url = video_file.text + if video_url.startswith('/'): + continue + if video_url.endswith('.m3u8'): + formats.extend(self._extract_m3u8_formats(video_url, video_id, m3u8_id='hls', fatal=False)) + elif video_url.endswith('.f4m'): + formats.extend(self._extract_f4m_formats(video_url + '?hdcore=3.4.1.1', video_id, f4m_id='hds', fatal=False)) + else: + key = video_file.attrib.get('bitrate') + format_info = { + 'format_id': key, + 'url': video_url, + } + mobj = re.search(r'(\d+)x(\d+)(?:_(\d+))?', key) + if mobj: + format_info.update({ + 'width': int(mobj.group(1)), + 'height': int(mobj.group(2)), + 'tbr': int_or_none(mobj.group(3)), + }) + formats.append(format_info) + self._sort_formats(formats) return { - 'id': shortened_video_id, - 'url': video_url, + 'id': video_id, 'title': title, 'description': description, 'duration': duration, + 'timestamp': timestamp, + 'thumbnails': thumbnails, + 'formats': formats, } diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py index e683d24c4..340c922bd 100644 --- a/youtube_dl/extractor/nbc.py +++ b/youtube_dl/extractor/nbc.py @@ -3,14 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_str, - compat_HTTPError, -) +from ..compat import compat_HTTPError from ..utils import ( ExtractorError, find_xpath_attr, lowercase_escape, + smuggle_url, unescapeHTML, ) @@ -62,12 +60,13 @@ class NBCIE(InfoExtractor): theplatform_url = unescapeHTML(lowercase_escape(self._html_search_regex( [ r'(?:class="video-player video-player-full" data-mpx-url|class="player" src)="(.*?)"', + r'<iframe[^>]+src="((?:https?:)?//player\.theplatform\.com/[^"]+)"', r'"embedURL"\s*:\s*"([^"]+)"' ], webpage, 'theplatform url').replace('_no_endcard', '').replace('\\/', '/'))) if theplatform_url.startswith('//'): theplatform_url = 'http:' + theplatform_url - return self.url_result(theplatform_url) + return self.url_result(smuggle_url(theplatform_url, {'source_url': url})) class NBCSportsVPlayerIE(InfoExtractor): @@ -187,7 +186,7 @@ class NBCNewsIE(InfoExtractor): 'title': info.find('headline').text, 'ext': 'flv', 'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text, - 'description': compat_str(info.find('caption').text), + 'description': info.find('caption').text, 'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text, } else: diff --git a/youtube_dl/extractor/ndr.py b/youtube_dl/extractor/ndr.py index e3cc6fde8..894c51399 100644 --- a/youtube_dl/extractor/ndr.py +++ b/youtube_dl/extractor/ndr.py @@ -14,7 +14,8 @@ from ..utils import ( class NDRBaseIE(InfoExtractor): def _real_extract(self, url): - display_id = self._match_id(url) + mobj = re.match(self._VALID_URL, url) + display_id = next(group for group in mobj.groups() if group) webpage = self._download_webpage(url, display_id) return self._extract_embed(webpage, display_id) @@ -22,7 +23,7 @@ class NDRBaseIE(InfoExtractor): class NDRIE(NDRBaseIE): IE_NAME = 'ndr' IE_DESC = 'NDR.de - Norddeutscher Rundfunk' - _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)+(?P<id>[^/?#]+),[\da-z]+\.html' + _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html', @@ -77,6 +78,9 @@ class NDRIE(NDRBaseIE): 'params': { 'skip_download': True, }, + }, { + 'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html', + 'only_matching': True, }] def _extract_embed(self, webpage, display_id): @@ -84,10 +88,10 @@ class NDRIE(NDRBaseIE): 'embedURL', webpage, 'embed URL', fatal=True) description = self._search_regex( r'<p[^>]+itemprop="description">([^<]+)</p>', - webpage, 'description', fatal=False) + webpage, 'description', default=None) or self._og_search_description(webpage) timestamp = parse_iso8601( self._search_regex( - r'<span itemprop="datePublished" content="([^"]+)">', + r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"', webpage, 'upload date', fatal=False)) return { '_type': 'url_transparent', @@ -101,7 +105,7 @@ class NDRIE(NDRBaseIE): class NJoyIE(NDRBaseIE): IE_NAME = 'njoy' IE_DESC = 'N-JOY' - _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)+(?P<id>[^/?#]+),[\da-z]+\.html' + _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html', @@ -136,6 +140,9 @@ class NJoyIE(NDRBaseIE): 'params': { 'skip_download': True, }, + }, { + 'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html', + 'only_matching': True, }] def _extract_embed(self, webpage, display_id): @@ -231,7 +238,7 @@ class NDREmbedBaseIE(InfoExtractor): class NDREmbedIE(NDREmbedBaseIE): IE_NAME = 'ndr:embed' - _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)+(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html' + _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html' _TESTS = [{ 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html', 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9', @@ -325,7 +332,7 @@ class NDREmbedIE(NDREmbedBaseIE): class NJoyEmbedIE(NDREmbedBaseIE): IE_NAME = 'njoy:embed' - _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)+(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html' + _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html' _TESTS = [{ # httpVideo 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html', diff --git a/youtube_dl/extractor/neteasemusic.py b/youtube_dl/extractor/neteasemusic.py index a8e0a64ed..15eca825a 100644 --- a/youtube_dl/extractor/neteasemusic.py +++ b/youtube_dl/extractor/neteasemusic.py @@ -8,11 +8,11 @@ import re from .common import InfoExtractor from ..compat import ( - compat_urllib_request, compat_urllib_parse, compat_str, compat_itertools_count, ) +from ..utils import sanitized_Request class NetEaseMusicBaseIE(InfoExtractor): @@ -40,7 +40,7 @@ class NetEaseMusicBaseIE(InfoExtractor): if not details: continue formats.append({ - 'url': 'http://m1.music.126.net/%s/%s.%s' % + 'url': 'http://m5.music.126.net/%s/%s.%s' % (cls._encrypt(details['dfsId']), details['dfsId'], details['extension']), 'ext': details.get('extension'), @@ -56,7 +56,7 @@ class NetEaseMusicBaseIE(InfoExtractor): return int(round(ms / 1000.0)) def query_api(self, endpoint, video_id, note): - req = compat_urllib_request.Request('%s%s' % (self._API_BASE, endpoint)) + req = sanitized_Request('%s%s' % (self._API_BASE, endpoint)) req.add_header('Referer', self._API_BASE) return self._download_json(req, video_id, note) diff --git a/youtube_dl/extractor/nextmedia.py b/youtube_dl/extractor/nextmedia.py index c10784f6b..d1688457f 100644 --- a/youtube_dl/extractor/nextmedia.py +++ b/youtube_dl/extractor/nextmedia.py @@ -126,7 +126,8 @@ class AppleDailyIE(NextMediaIE): 'thumbnail': 're:^https?://.*\.jpg$', 'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd', 'upload_date': '20150128', - } + }, + 'skip': 'redirect to http://www.appledaily.com.tw/animation/', }, { # No thumbnail 'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/', @@ -140,10 +141,19 @@ class AppleDailyIE(NextMediaIE): }, 'expected_warnings': [ 'video thumbnail', - ] + ], + 'skip': 'redirect to http://www.appledaily.com.tw/animation/', }, { 'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/', - 'only_matching': True, + 'md5': 'eaa20e6b9df418c912d7f5dec2ba734d', + 'info_dict': { + 'id': '35770334', + 'ext': 'mp4', + 'title': '咖啡占卜測 XU裝熟指數', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748', + 'upload_date': '20140417', + }, }] _URL_PATTERN = r'\{url: \'(.+)\'\}' diff --git a/youtube_dl/extractor/nfb.py b/youtube_dl/extractor/nfb.py index ea077254b..5bd15f7a7 100644 --- a/youtube_dl/extractor/nfb.py +++ b/youtube_dl/extractor/nfb.py @@ -1,10 +1,8 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse, -) +from ..compat import compat_urllib_parse +from ..utils import sanitized_Request class NFBIE(InfoExtractor): @@ -40,8 +38,9 @@ class NFBIE(InfoExtractor): uploader = self._html_search_regex(r'<em class="director-name" itemprop="name">([^<]+)</em>', page, 'director name', fatal=False) - request = compat_urllib_request.Request('https://www.nfb.ca/film/%s/player_config' % video_id, - compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii')) + request = sanitized_Request( + 'https://www.nfb.ca/film/%s/player_config' % video_id, + compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii')) request.add_header('Content-Type', 'application/x-www-form-urlencoded') request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf') diff --git a/youtube_dl/extractor/nfl.py b/youtube_dl/extractor/nfl.py index 55dc6107d..200874d68 100644 --- a/youtube_dl/extractor/nfl.py +++ b/youtube_dl/extractor/nfl.py @@ -108,6 +108,20 @@ class NFLIE(InfoExtractor): 'upload_date': '20150918', }, }, { + # lowercase data-contentid + 'url': 'http://www.steelers.com/news/article-1/Tomlin-on-Ben-getting-Vick-ready/56399c96-4160-48cf-a7ad-1d17d4a3aef7', + 'info_dict': { + 'id': '12693586-6ea9-4743-9c1c-02c59e4a5ef2', + 'ext': 'mp4', + 'title': 'Tomlin looks ahead to Ravens on a short week', + 'description': 'md5:32f3f7b139f43913181d5cbb24ecad75', + 'timestamp': 1443459651, + 'upload_date': '20150928', + }, + 'params': { + 'skip_download': True, + }, + }, { 'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood', 'only_matching': True, }, { @@ -151,7 +165,7 @@ class NFLIE(InfoExtractor): group='config')) # For articles, the id in the url is not the video id video_id = self._search_regex( - r'(?:<nflcs:avplayer[^>]+data-contentId\s*=\s*|contentId\s*:\s*)(["\'])(?P<id>.+?)\1', + r'(?:<nflcs:avplayer[^>]+data-content[Ii]d\s*=\s*|content[Ii]d\s*:\s*)(["\'])(?P<id>.+?)\1', webpage, 'video id', default=video_id, group='id') config = self._download_json(config_url, video_id, 'Downloading player config') url_template = NFLIE.prepend_host( diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py index bda1cff05..586e52a4a 100644 --- a/youtube_dl/extractor/niconico.py +++ b/youtube_dl/extractor/niconico.py @@ -8,7 +8,6 @@ import datetime from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( @@ -17,6 +16,7 @@ from ..utils import ( int_or_none, parse_duration, parse_iso8601, + sanitized_Request, xpath_text, determine_ext, ) @@ -102,7 +102,7 @@ class NiconicoIE(InfoExtractor): 'password': password, } login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8') - request = compat_urllib_request.Request( + request = sanitized_Request( 'https://secure.nicovideo.jp/secure/login', login_data) login_results = self._download_webpage( request, None, note='Logging in', errnote='Unable to log in') @@ -145,7 +145,7 @@ class NiconicoIE(InfoExtractor): 'k': thumb_play_key, 'v': video_id }) - flv_info_request = compat_urllib_request.Request( + flv_info_request = sanitized_Request( 'http://ext.nicovideo.jp/thumb_watch', flv_info_data, {'Content-Type': 'application/x-www-form-urlencoded'}) flv_info_webpage = self._download_webpage( diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py index a53e27b27..d440313d5 100644 --- a/youtube_dl/extractor/noco.py +++ b/youtube_dl/extractor/noco.py @@ -9,7 +9,7 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, + compat_urlparse, ) from ..utils import ( clean_html, @@ -17,6 +17,7 @@ from ..utils import ( int_or_none, float_or_none, parse_iso8601, + sanitized_Request, ) @@ -74,7 +75,7 @@ class NocoIE(InfoExtractor): 'username': username, 'password': password, } - request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) + request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') login = self._download_json(request, None, 'Logging in as %s' % username) @@ -82,14 +83,21 @@ class NocoIE(InfoExtractor): if 'erreur' in login: raise ExtractorError('Unable to login: %s' % clean_html(login['erreur']), expected=True) + @staticmethod + def _ts(): + return int(time.time() * 1000) + def _call_api(self, path, video_id, note, sub_lang=None): - ts = compat_str(int(time.time() * 1000)) + ts = compat_str(self._ts() + self._ts_offset) tk = hashlib.md5((hashlib.md5(ts.encode('ascii')).hexdigest() + '#8S?uCraTedap6a').encode('ascii')).hexdigest() url = self._API_URL_TEMPLATE % (path, ts, tk) if sub_lang: url += self._SUB_LANG_TEMPLATE % sub_lang - resp = self._download_json(url, video_id, note) + request = sanitized_Request(url) + request.add_header('Referer', self._referer) + + resp = self._download_json(request, video_id, note) if isinstance(resp, dict) and resp.get('error'): self._raise_error(resp['error'], resp['description']) @@ -102,8 +110,22 @@ class NocoIE(InfoExtractor): expected=True) def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) + + # Timestamp adjustment offset between server time and local time + # must be calculated in order to use timestamps closest to server's + # in all API requests (see https://github.com/rg3/youtube-dl/issues/7864) + webpage = self._download_webpage(url, video_id) + + player_url = self._search_regex( + r'(["\'])(?P<player>https?://noco\.tv/(?:[^/]+/)+NocoPlayer.+?\.swf.*?)\1', + webpage, 'noco player', group='player', + default='http://noco.tv/cdata/js/player/NocoPlayer-v1.2.40.swf') + + qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(player_url).query) + ts = int_or_none(qs.get('ts', [None])[0]) + self._ts_offset = ts - self._ts() if ts else 0 + self._referer = player_url medias = self._call_api( 'shows/%s/medias' % video_id, @@ -155,8 +177,8 @@ class NocoIE(InfoExtractor): 'format_id': format_id_extended, 'width': int_or_none(fmt.get('res_width')), 'height': int_or_none(fmt.get('res_lines')), - 'abr': int_or_none(fmt.get('audiobitrate')), - 'vbr': int_or_none(fmt.get('videobitrate')), + 'abr': int_or_none(fmt.get('audiobitrate'), 1000), + 'vbr': int_or_none(fmt.get('videobitrate'), 1000), 'filesize': int_or_none(fmt.get('filesize')), 'format_note': qualities[format_id].get('quality_name'), 'quality': qualities[format_id].get('priority'), diff --git a/youtube_dl/extractor/nosvideo.py b/youtube_dl/extractor/nosvideo.py index f5ef856db..eab816e49 100644 --- a/youtube_dl/extractor/nosvideo.py +++ b/youtube_dl/extractor/nosvideo.py @@ -4,11 +4,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, urlencode_postdata, xpath_text, xpath_with_ns, @@ -41,7 +39,7 @@ class NosVideoIE(InfoExtractor): 'op': 'download1', 'method_free': 'Continue to Video', } - req = compat_urllib_request.Request(url, urlencode_postdata(fields)) + req = sanitized_Request(url, urlencode_postdata(fields)) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage(req, video_id, 'Downloading download page') diff --git a/youtube_dl/extractor/novamov.py b/youtube_dl/extractor/novamov.py index 04d779890..d68c1ad79 100644 --- a/youtube_dl/extractor/novamov.py +++ b/youtube_dl/extractor/novamov.py @@ -3,11 +3,13 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( ExtractorError, + NO_DEFAULT, + encode_dict, + sanitized_Request, + urlencode_postdata, ) @@ -15,15 +17,16 @@ class NovaMovIE(InfoExtractor): IE_NAME = 'novamov' IE_DESC = 'NovaMov' - _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})' + _VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video|mobile/#/videos)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})' _VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'} _HOST = 'www.novamov.com' _FILE_DELETED_REGEX = r'This file no longer exists on our servers!</h2>' - _FILEKEY_REGEX = r'flashvars\.filekey="(?P<filekey>[^"]+)";' + _FILEKEY_REGEX = r'flashvars\.filekey=(?P<filekey>"?[^"]+"?);' _TITLE_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>' _DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>' + _URL_TEMPLATE = 'http://%s/video/%s' _TEST = { 'url': 'http://www.novamov.com/video/4rurhn9x446jj', @@ -37,20 +40,50 @@ class NovaMovIE(InfoExtractor): 'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)' } - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - page = self._download_webpage( - 'http://%s/video/%s' % (self._HOST, video_id), video_id, 'Downloading video page') - - if re.search(self._FILE_DELETED_REGEX, page) is not None: + def _check_existence(self, webpage, video_id): + if re.search(self._FILE_DELETED_REGEX, webpage) is not None: raise ExtractorError('Video %s does not exist' % video_id, expected=True) - filekey = self._search_regex(self._FILEKEY_REGEX, page, 'filekey') - - title = self._html_search_regex(self._TITLE_REGEX, page, 'title', fatal=False) - description = self._html_search_regex(self._DESCRIPTION_REGEX, page, 'description', default='', fatal=False) + def _real_extract(self, url): + video_id = self._match_id(url) + + url = self._URL_TEMPLATE % (self._HOST, video_id) + + webpage = self._download_webpage( + url, video_id, 'Downloading video page') + + self._check_existence(webpage, video_id) + + def extract_filekey(default=NO_DEFAULT): + filekey = self._search_regex( + self._FILEKEY_REGEX, webpage, 'filekey', default=default) + if filekey is not default and (filekey[0] != '"' or filekey[-1] != '"'): + return self._search_regex( + r'var\s+%s\s*=\s*"([^"]+)"' % re.escape(filekey), webpage, 'filekey', default=default) + else: + return filekey + + filekey = extract_filekey(default=None) + + if not filekey: + fields = self._hidden_inputs(webpage) + post_url = self._search_regex( + r'<form[^>]+action=(["\'])(?P<url>.+?)\1', webpage, + 'post url', default=url, group='url') + if not post_url.startswith('http'): + post_url = compat_urlparse.urljoin(url, post_url) + request = sanitized_Request( + post_url, urlencode_postdata(encode_dict(fields))) + request.add_header('Content-Type', 'application/x-www-form-urlencoded') + request.add_header('Referer', post_url) + webpage = self._download_webpage( + request, video_id, 'Downloading continue to the video page') + self._check_existence(webpage, video_id) + + filekey = extract_filekey() + + title = self._html_search_regex(self._TITLE_REGEX, webpage, 'title', fatal=False) + description = self._html_search_regex(self._DESCRIPTION_REGEX, webpage, 'description', default='', fatal=False) api_response = self._download_webpage( 'http://%s/api/player.api.php?key=%s&file=%s' % (self._HOST, filekey, video_id), video_id, @@ -69,3 +102,89 @@ class NovaMovIE(InfoExtractor): 'title': title, 'description': description } + + +class WholeCloudIE(NovaMovIE): + IE_NAME = 'wholecloud' + IE_DESC = 'WholeCloud' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': '(?:wholecloud\.net|movshare\.(?:net|sx|ag))'} + + _HOST = 'www.wholecloud.net' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<strong>Title:</strong> ([^<]+)</p>' + _DESCRIPTION_REGEX = r'<strong>Description:</strong> ([^<]+)</p>' + + _TEST = { + 'url': 'http://www.wholecloud.net/video/559e28be54d96', + 'md5': 'abd31a2132947262c50429e1d16c1bfd', + 'info_dict': { + 'id': '559e28be54d96', + 'ext': 'flv', + 'title': 'dissapeared image', + 'description': 'optical illusion dissapeared image magic illusion', + } + } + + +class NowVideoIE(NovaMovIE): + IE_NAME = 'nowvideo' + IE_DESC = 'NowVideo' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:to|ch|ec|sx|eu|at|ag|co|li)'} + + _HOST = 'www.nowvideo.to' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<h4>([^<]+)</h4>' + _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>' + + _TEST = { + 'url': 'http://www.nowvideo.sx/video/f1d6fce9a968b', + 'md5': '12c82cad4f2084881d8bc60ee29df092', + 'info_dict': { + 'id': 'f1d6fce9a968b', + 'ext': 'flv', + 'title': 'youtubedl test video BaWjenozKc', + 'description': 'Description', + }, + } + + +class VideoWeedIE(NovaMovIE): + IE_NAME = 'videoweed' + IE_DESC = 'VideoWeed' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'} + + _HOST = 'www.videoweed.es' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>' + _URL_TEMPLATE = 'http://%s/file/%s' + + _TEST = { + 'url': 'http://www.videoweed.es/file/b42178afbea14', + 'md5': 'abd31a2132947262c50429e1d16c1bfd', + 'info_dict': { + 'id': 'b42178afbea14', + 'ext': 'flv', + 'title': 'optical illusion dissapeared image magic illusion', + 'description': '' + }, + } + + +class CloudTimeIE(NovaMovIE): + IE_NAME = 'cloudtime' + IE_DESC = 'CloudTime' + + _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'cloudtime\.to'} + + _HOST = 'www.cloudtime.to' + + _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' + _TITLE_REGEX = r'<div[^>]+class=["\']video_det["\'][^>]*>\s*<strong>([^<]+)</strong>' + + _TEST = None diff --git a/youtube_dl/extractor/nowness.py b/youtube_dl/extractor/nowness.py index b97f62fdb..446f5901c 100644 --- a/youtube_dl/extractor/nowness.py +++ b/youtube_dl/extractor/nowness.py @@ -1,12 +1,15 @@ # encoding: utf-8 from __future__ import unicode_literals -from .brightcove import BrightcoveIE +from .brightcove import ( + BrightcoveLegacyIE, + BrightcoveNewIE, +) from .common import InfoExtractor -from ..utils import ExtractorError -from ..compat import ( - compat_str, - compat_urllib_request, +from ..compat import compat_str +from ..utils import ( + ExtractorError, + sanitized_Request, ) @@ -22,10 +25,13 @@ class NownessBaseIE(InfoExtractor): 'http://www.nowness.com/iframe?id=%s' % video_id, video_id, note='Downloading player JavaScript', errnote='Unable to download player JavaScript') - bc_url = BrightcoveIE._extract_brightcove_url(player_code) - if bc_url is None: - raise ExtractorError('Could not find player definition') - return self.url_result(bc_url, 'Brightcove') + bc_url = BrightcoveLegacyIE._extract_brightcove_url(player_code) + if bc_url: + return self.url_result(bc_url, BrightcoveLegacyIE.ie_key()) + bc_url = BrightcoveNewIE._extract_url(player_code) + if bc_url: + return self.url_result(bc_url, BrightcoveNewIE.ie_key()) + raise ExtractorError('Could not find player definition') elif source == 'vimeo': return self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo') elif source == 'youtube': @@ -37,7 +43,7 @@ class NownessBaseIE(InfoExtractor): def _api_request(self, url, request_path): display_id = self._match_id(url) - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://api.nowness.com/api/' + request_path % display_id, headers={ 'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us', diff --git a/youtube_dl/extractor/nowtv.py b/youtube_dl/extractor/nowtv.py index c8257719f..fd107aca2 100644 --- a/youtube_dl/extractor/nowtv.py +++ b/youtube_dl/extractor/nowtv.py @@ -1,6 +1,8 @@ # coding: utf-8 from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..compat import compat_str from ..utils import ( @@ -13,8 +15,63 @@ from ..utils import ( ) -class NowTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<id>.+?)/(?:player|preview)' +class NowTVBaseIE(InfoExtractor): + _VIDEO_FIELDS = ( + 'id', 'title', 'free', 'geoblocked', 'articleLong', 'articleShort', + 'broadcastStartDate', 'seoUrl', 'duration', 'files', + 'format.defaultImage169Format', 'format.defaultImage169Logo') + + def _extract_video(self, info, display_id=None): + video_id = compat_str(info['id']) + + files = info['files'] + if not files: + if info.get('geoblocked', False): + raise ExtractorError( + 'Video %s is not available from your location due to geo restriction' % video_id, + expected=True) + if not info.get('free', True): + raise ExtractorError( + 'Video %s is not available for free' % video_id, expected=True) + + formats = [] + for item in files['items']: + if determine_ext(item['path']) != 'f4v': + continue + app, play_path = remove_start(item['path'], '/').split('/', 1) + formats.append({ + 'url': 'rtmpe://fms.rtl.de', + 'app': app, + 'play_path': 'mp4:%s' % play_path, + 'ext': 'flv', + 'page_url': 'http://rtlnow.rtl.de', + 'player_url': 'http://cdn.static-fra.de/now/vodplayer.swf', + 'tbr': int_or_none(item.get('bitrate')), + }) + self._sort_formats(formats) + + title = info['title'] + description = info.get('articleLong') or info.get('articleShort') + timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ') + duration = parse_duration(info.get('duration')) + + f = info.get('format', {}) + thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo') + + return { + 'id': video_id, + 'display_id': display_id or info.get('seoUrl'), + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'duration': duration, + 'formats': formats, + } + + +class NowTVIE(NowTVBaseIE): + _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/(?:(?:list/[^/]+|jahr/\d{4}/\d{1,2})/)?(?P<id>[^/]+)/(?:player|preview)' _TESTS = [{ # rtl @@ -23,7 +80,7 @@ class NowTVIE(InfoExtractor): 'id': '203519', 'display_id': 'bauer-sucht-frau/die-neuen-bauern-und-eine-hochzeit', 'ext': 'flv', - 'title': 'Die neuen Bauern und eine Hochzeit', + 'title': 'Inka Bause stellt die neuen Bauern vor', 'description': 'md5:e234e1ed6d63cf06be5c070442612e7e', 'thumbnail': 're:^https?://.*\.jpg$', 'timestamp': 1432580700, @@ -133,61 +190,71 @@ class NowTVIE(InfoExtractor): }, { 'url': 'http://www.nowtv.de/rtl2/echtzeit/list/aktuell/schnelles-geld-am-ende-der-welt/player', 'only_matching': True, + }, { + 'url': 'http://www.nowtv.de/rtl2/zuhause-im-glueck/jahr/2015/11/eine-erschuetternde-diagnose/player', + 'only_matching': True, }] def _real_extract(self, url): - display_id = self._match_id(url) - display_id_split = display_id.split('/') - if len(display_id) > 2: - display_id = '/'.join((display_id_split[0], display_id_split[-1])) + mobj = re.match(self._VALID_URL, url) + display_id = '%s/%s' % (mobj.group('show_id'), mobj.group('id')) info = self._download_json( - 'https://api.nowtv.de/v3/movies/%s?fields=id,title,free,geoblocked,articleLong,articleShort,broadcastStartDate,seoUrl,duration,format,files' % display_id, - display_id) + 'https://api.nowtv.de/v3/movies/%s?fields=%s' + % (display_id, ','.join(self._VIDEO_FIELDS)), display_id) - video_id = compat_str(info['id']) + return self._extract_video(info, display_id) - files = info['files'] - if not files: - if info.get('geoblocked', False): - raise ExtractorError( - 'Video %s is not available from your location due to geo restriction' % video_id, - expected=True) - if not info.get('free', True): - raise ExtractorError( - 'Video %s is not available for free' % video_id, expected=True) - formats = [] - for item in files['items']: - if determine_ext(item['path']) != 'f4v': - continue - app, play_path = remove_start(item['path'], '/').split('/', 1) - formats.append({ - 'url': 'rtmpe://fms.rtl.de', - 'app': app, - 'play_path': 'mp4:%s' % play_path, - 'ext': 'flv', - 'page_url': url, - 'player_url': 'http://rtl-now.rtl.de/includes/nc_player.swf', - 'tbr': int_or_none(item.get('bitrate')), - }) - self._sort_formats(formats) +class NowTVListIE(NowTVBaseIE): + _VALID_URL = r'https?://(?:www\.)?nowtv\.(?:de|at|ch)/(?:rtl|rtl2|rtlnitro|superrtl|ntv|vox)/(?P<show_id>[^/]+)/list/(?P<id>[^?/#&]+)$' - title = info['title'] - description = info.get('articleLong') or info.get('articleShort') - timestamp = parse_iso8601(info.get('broadcastStartDate'), ' ') - duration = parse_duration(info.get('duration')) + _SHOW_FIELDS = ('title', ) + _SEASON_FIELDS = ('id', 'headline', 'seoheadline', ) - f = info.get('format', {}) - thumbnail = f.get('defaultImage169Format') or f.get('defaultImage169Logo') + _TESTS = [{ + 'url': 'http://www.nowtv.at/rtl/stern-tv/list/aktuell', + 'info_dict': { + 'id': '17006', + 'title': 'stern TV - Aktuell', + }, + 'playlist_count': 1, + }, { + 'url': 'http://www.nowtv.at/rtl/das-supertalent/list/free-staffel-8', + 'info_dict': { + 'id': '20716', + 'title': 'Das Supertalent - FREE Staffel 8', + }, + 'playlist_count': 14, + }] - return { - 'id': video_id, - 'display_id': display_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'timestamp': timestamp, - 'duration': duration, - 'formats': formats, - } + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + show_id = mobj.group('show_id') + season_id = mobj.group('id') + + fields = [] + fields.extend(self._SHOW_FIELDS) + fields.extend('formatTabs.%s' % field for field in self._SEASON_FIELDS) + fields.extend( + 'formatTabs.formatTabPages.container.movies.%s' % field + for field in self._VIDEO_FIELDS) + + list_info = self._download_json( + 'https://api.nowtv.de/v3/formats/seo?fields=%s&name=%s.php' + % (','.join(fields), show_id), + season_id) + + season = next( + season for season in list_info['formatTabs']['items'] + if season.get('seoheadline') == season_id) + + title = '%s - %s' % (list_info['title'], season['headline']) + + entries = [] + for container in season['formatTabPages']['items']: + for info in ((container.get('container') or {}).get('movies') or {}).get('items') or []: + entries.append(self._extract_video(info)) + + return self.playlist_result( + entries, compat_str(season.get('id') or season_id), title) diff --git a/youtube_dl/extractor/nowvideo.py b/youtube_dl/extractor/nowvideo.py deleted file mode 100644 index 17baa9679..000000000 --- a/youtube_dl/extractor/nowvideo.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import unicode_literals - -from .novamov import NovaMovIE - - -class NowVideoIE(NovaMovIE): - IE_NAME = 'nowvideo' - IE_DESC = 'NowVideo' - - _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'nowvideo\.(?:ch|ec|sx|eu|at|ag|co|li)'} - - _HOST = 'www.nowvideo.ch' - - _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' - _FILEKEY_REGEX = r'var fkzd="([^"]+)";' - _TITLE_REGEX = r'<h4>([^<]+)</h4>' - _DESCRIPTION_REGEX = r'</h4>\s*<p>([^<]+)</p>' - - _TEST = { - 'url': 'http://www.nowvideo.ch/video/0mw0yow7b6dxa', - 'md5': 'f8fbbc8add72bd95b7850c6a02fc8817', - 'info_dict': { - 'id': '0mw0yow7b6dxa', - 'ext': 'flv', - 'title': 'youtubedl test video _BaW_jenozKc.mp4', - 'description': 'Description', - } - } diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index d066a96db..6ff13050d 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -4,7 +4,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..compat import compat_urlparse from ..utils import ( + determine_ext, ExtractorError, float_or_none, parse_duration, @@ -47,12 +49,22 @@ class NRKIE(InfoExtractor): 'http://v8.psapi.nrk.no/mediaelement/%s' % video_id, video_id, 'Downloading media JSON') - if data['usageRights']['isGeoBlocked']: - raise ExtractorError( - 'NRK har ikke rettig-heter til å vise dette programmet utenfor Norge', - expected=True) + media_url = data.get('mediaUrl') - video_url = data['mediaUrl'] + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81' + if not media_url: + if data['usageRights']['isGeoBlocked']: + raise ExtractorError( + 'NRK har ikke rettigheter til å vise dette programmet utenfor Norge', + expected=True) + + if determine_ext(media_url) == 'f4m': + formats = self._extract_f4m_formats( + media_url + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81', video_id, f4m_id='hds') + else: + formats = [{ + 'url': media_url, + 'ext': 'flv', + }] duration = parse_duration(data.get('duration')) @@ -66,12 +78,11 @@ class NRKIE(InfoExtractor): return { 'id': video_id, - 'url': video_url, - 'ext': 'flv', 'title': data['title'], 'description': data['description'], 'duration': duration, 'thumbnail': thumbnail, + 'formats': formats, } @@ -196,20 +207,6 @@ class NRKTVIE(InfoExtractor): } ] - def _debug_print(self, txt): - if self._downloader.params.get('verbose', False): - self.to_screen('[debug] %s' % txt) - - def _get_subtitles(self, subtitlesurl, video_id, baseurl): - url = "%s%s" % (baseurl, subtitlesurl) - self._debug_print('%s: Subtitle url: %s' % (video_id, url)) - captions = self._download_xml( - url, video_id, 'Downloading subtitles') - lang = captions.get('lang', 'no') - return {lang: [ - {'ext': 'ttml', 'url': url}, - ]} - def _extract_f4m(self, manifest_url, video_id): return self._extract_f4m_formats( manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id, f4m_id='hds') @@ -218,7 +215,7 @@ class NRKTVIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') part_id = mobj.group('part_id') - baseurl = mobj.group('baseurl') + base_url = mobj.group('baseurl') webpage = self._download_webpage(url, video_id) @@ -278,11 +275,14 @@ class NRKTVIE(InfoExtractor): self._sort_formats(formats) subtitles_url = self._html_search_regex( - r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"', - webpage, 'subtitle URL', default=None) - subtitles = None + r'data-subtitlesurl\s*=\s*(["\'])(?P<url>.+?)\1', + webpage, 'subtitle URL', default=None, group='url') + subtitles = {} if subtitles_url: - subtitles = self.extract_subtitles(subtitles_url, video_id, baseurl) + subtitles['no'] = [{ + 'ext': 'ttml', + 'url': compat_urlparse.urljoin(base_url, subtitles_url), + }] return { 'id': video_id, diff --git a/youtube_dl/extractor/nuvid.py b/youtube_dl/extractor/nuvid.py index 57928f2ae..9fa7cefad 100644 --- a/youtube_dl/extractor/nuvid.py +++ b/youtube_dl/extractor/nuvid.py @@ -3,11 +3,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( parse_duration, + sanitized_Request, unified_strdate, ) @@ -33,7 +31,7 @@ class NuvidIE(InfoExtractor): formats = [] for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]: - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://m.nuvid.com/play/%s' % video_id) request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed) webpage = self._download_webpage( diff --git a/youtube_dl/extractor/odnoklassniki.py b/youtube_dl/extractor/odnoklassniki.py index ccc88cfb1..184c7a323 100644 --- a/youtube_dl/extractor/odnoklassniki.py +++ b/youtube_dl/extractor/odnoklassniki.py @@ -13,7 +13,7 @@ from ..utils import ( class OdnoklassnikiIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>[\d-]+)' + _VALID_URL = r'https?://(?:www\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P<id>[\d-]+)' _TESTS = [{ # metadata in JSON 'url': 'http://ok.ru/video/20079905452', @@ -66,6 +66,9 @@ class OdnoklassnikiIE(InfoExtractor): }, { 'url': 'http://www.ok.ru/video/20648036891', 'only_matching': True, + }, { + 'url': 'http://www.ok.ru/videoembed/20648036891', + 'only_matching': True, }] def _real_extract(self, url): diff --git a/youtube_dl/extractor/ooyala.py b/youtube_dl/extractor/ooyala.py index a262a9f6d..20b984288 100644 --- a/youtube_dl/extractor/ooyala.py +++ b/youtube_dl/extractor/ooyala.py @@ -1,108 +1,85 @@ from __future__ import unicode_literals import re -import json import base64 from .common import InfoExtractor from ..utils import ( - unescapeHTML, - ExtractorError, - determine_ext, int_or_none, + float_or_none, + ExtractorError, + unsmuggle_url, ) +from ..compat import compat_urllib_parse class OoyalaBaseIE(InfoExtractor): - - def _extract_result(self, info, more_info): - embedCode = info['embedCode'] - video_url = info.get('ipad_url') or info['url'] - - if determine_ext(video_url) == 'm3u8': - formats = self._extract_m3u8_formats(video_url, embedCode, ext='mp4') - else: - formats = [{ - 'url': video_url, - 'ext': 'mp4', - }] - - return { - 'id': embedCode, - 'title': unescapeHTML(info['title']), - 'formats': formats, - 'description': unescapeHTML(more_info['description']), - 'thumbnail': more_info['promo'], + _PLAYER_BASE = 'http://player.ooyala.com/' + _CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/' + _AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v1/authorization/embed_code/%s/%s?' + + def _extract(self, content_tree_url, video_id, domain='example.org'): + content_tree = self._download_json(content_tree_url, video_id)['content_tree'] + metadata = content_tree[list(content_tree)[0]] + embed_code = metadata['embed_code'] + pcode = metadata.get('asset_pcode') or embed_code + video_info = { + 'id': embed_code, + 'title': metadata['title'], + 'description': metadata.get('description'), + 'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'), + 'duration': float_or_none(metadata.get('duration'), 1000), } - def _extract(self, player_url, video_id): - player = self._download_webpage(player_url, video_id) - mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="', - player, 'mobile player url') - # Looks like some videos are only available for particular devices - # (e.g. http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0 - # is only available for ipad) - # Working around with fetching URLs for all the devices found starting with 'unknown' - # until we succeed or eventually fail for each device. - devices = re.findall(r'device\s*=\s*"([^"]+)";', player) - devices.remove('unknown') - devices.insert(0, 'unknown') - for device in devices: - mobile_player = self._download_webpage( - '%s&device=%s' % (mobile_url, device), video_id, - 'Downloading mobile player JS for %s device' % device) - videos_info = self._search_regex( - r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);', - mobile_player, 'info', fatal=False, default=None) - if videos_info: - break - - if not videos_info: - formats = [] + urls = [] + formats = [] + for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'): auth_data = self._download_json( - 'http://player.ooyala.com/sas/player_api/v1/authorization/embed_code/%s/%s?domain=www.example.org&supportedFormats=mp4,webm' % (video_id, video_id), - video_id) - - cur_auth_data = auth_data['authorization_data'][video_id] - - for stream in cur_auth_data['streams']: - formats.append({ - 'url': base64.b64decode(stream['url']['data'].encode('ascii')).decode('utf-8'), - 'ext': stream.get('delivery_type'), - 'format': stream.get('video_codec'), - 'format_id': stream.get('profile'), - 'width': int_or_none(stream.get('width')), - 'height': int_or_none(stream.get('height')), - 'abr': int_or_none(stream.get('audio_bitrate')), - 'vbr': int_or_none(stream.get('video_bitrate')), - }) - if formats: - return { - 'id': video_id, - 'formats': formats, - 'title': 'Ooyala video', - } - - if not cur_auth_data['authorized']: - raise ExtractorError(cur_auth_data['message'], expected=True) - - if not videos_info: - raise ExtractorError('Unable to extract info') - videos_info = videos_info.replace('\\"', '"') - videos_more_info = self._search_regex( - r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, 'more info').replace('\\"', '"') - videos_info = json.loads(videos_info) - videos_more_info = json.loads(videos_more_info) - - if videos_more_info.get('lineup'): - videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])] - return { - '_type': 'playlist', - 'id': video_id, - 'title': unescapeHTML(videos_more_info['title']), - 'entries': videos, - } - else: - return self._extract_result(videos_info[0], videos_more_info) + self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) + + compat_urllib_parse.urlencode({ + 'domain': domain, + 'supportedFormats': supported_format + }), + video_id, 'Downloading %s JSON' % supported_format) + + cur_auth_data = auth_data['authorization_data'][embed_code] + + if cur_auth_data['authorized']: + for stream in cur_auth_data['streams']: + url = base64.b64decode( + stream['url']['data'].encode('ascii')).decode('utf-8') + if url in urls: + continue + urls.append(url) + delivery_type = stream['delivery_type'] + if delivery_type == 'hls' or '.m3u8' in url: + formats.extend(self._extract_m3u8_formats( + url, embed_code, 'mp4', 'm3u8_native', + m3u8_id='hls', fatal=False)) + elif delivery_type == 'hds' or '.f4m' in url: + formats.extend(self._extract_f4m_formats( + url + '?hdcore=3.7.0', embed_code, f4m_id='hds', fatal=False)) + elif '.smil' in url: + formats.extend(self._extract_smil_formats( + url, embed_code, fatal=False)) + else: + formats.append({ + 'url': url, + 'ext': stream.get('delivery_type'), + 'vcodec': stream.get('video_codec'), + 'format_id': delivery_type, + 'width': int_or_none(stream.get('width')), + 'height': int_or_none(stream.get('height')), + 'abr': int_or_none(stream.get('audio_bitrate')), + 'vbr': int_or_none(stream.get('video_bitrate')), + 'fps': float_or_none(stream.get('framerate')), + }) + else: + raise ExtractorError('%s said: %s' % ( + self.IE_NAME, cur_auth_data['message']), expected=True) + self._sort_formats(formats) + + video_info['formats'] = formats + return video_info class OoyalaIE(OoyalaBaseIE): @@ -117,6 +94,7 @@ class OoyalaIE(OoyalaBaseIE): 'ext': 'mp4', 'title': 'Explaining Data Recovery from Hard Drives and SSDs', 'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.', + 'duration': 853.386, }, }, { # Only available for ipad @@ -125,7 +103,7 @@ class OoyalaIE(OoyalaBaseIE): 'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0', 'ext': 'mp4', 'title': 'Simulation Overview - Levels of Simulation', - 'description': '', + 'duration': 194.948, }, }, { @@ -136,7 +114,8 @@ class OoyalaIE(OoyalaBaseIE): 'info_dict': { 'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx', 'ext': 'mp4', - 'title': 'Ooyala video', + 'title': 'Divide Tool Path.mp4', + 'duration': 204.405, } } ] @@ -151,9 +130,11 @@ class OoyalaIE(OoyalaBaseIE): ie=cls.ie_key()) def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) embed_code = self._match_id(url) - player_url = 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code - return self._extract(player_url, embed_code) + domain = smuggled_data.get('domain') + content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code) + return self._extract(content_tree_url, embed_code, domain) class OoyalaExternalIE(OoyalaBaseIE): @@ -170,7 +151,7 @@ class OoyalaExternalIE(OoyalaBaseIE): .*?&pcode= ) (?P<pcode>.+?) - (&|$) + (?:&|$) ''' _TEST = { @@ -179,7 +160,7 @@ class OoyalaExternalIE(OoyalaBaseIE): 'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG', 'ext': 'mp4', 'title': 'dm_140128_30for30Shorts___JudgingJewellv2', - 'description': '', + 'duration': 1302.0, }, 'params': { # m3u8 download @@ -188,9 +169,6 @@ class OoyalaExternalIE(OoyalaBaseIE): } def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - partner_id = mobj.group('partner_id') - video_id = mobj.group('id') - pcode = mobj.group('pcode') - player_url = 'http://player.ooyala.com/player.js?externalId=%s:%s&pcode=%s' % (partner_id, video_id, pcode) - return self._extract(player_url, video_id) + partner_id, video_id, pcode = re.match(self._VALID_URL, url).groups() + content_tree_url = self._CONTENT_TREE_BASE + 'external_id/%s/%s:%s' % (pcode, partner_id, video_id) + return self._extract(content_tree_url, video_id) diff --git a/youtube_dl/extractor/patreon.py b/youtube_dl/extractor/patreon.py index 6cdc2638b..ec8876c28 100644 --- a/youtube_dl/extractor/patreon.py +++ b/youtube_dl/extractor/patreon.py @@ -2,9 +2,7 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import ( - js_to_json, -) +from ..utils import js_to_json class PatreonIE(InfoExtractor): @@ -65,7 +63,7 @@ class PatreonIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( 'https://www.patreon.com/processLogin', compat_urllib_parse.urlencode(login_form).encode('utf-8') ) diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py index 683c81de3..97e8ffc97 100644 --- a/youtube_dl/extractor/pbs.py +++ b/youtube_dl/extractor/pbs.py @@ -8,22 +8,188 @@ from ..utils import ( ExtractorError, determine_ext, int_or_none, + strip_jsonp, unified_strdate, US_RATINGS, ) class PBSIE(InfoExtractor): + _STATIONS = ( + (r'(?:video|www|player)\.pbs\.org', 'PBS: Public Broadcasting Service'), # http://www.pbs.org/ + (r'video\.aptv\.org', 'APT - Alabama Public Television (WBIQ)'), # http://aptv.org/ + (r'video\.gpb\.org', 'GPB/Georgia Public Broadcasting (WGTV)'), # http://www.gpb.org/ + (r'video\.mpbonline\.org', 'Mississippi Public Broadcasting (WMPN)'), # http://www.mpbonline.org + (r'video\.wnpt\.org', 'Nashville Public Television (WNPT)'), # http://www.wnpt.org + (r'video\.wfsu\.org', 'WFSU-TV (WFSU)'), # http://wfsu.org/ + (r'video\.wsre\.org', 'WSRE (WSRE)'), # http://www.wsre.org + (r'video\.wtcitv\.org', 'WTCI (WTCI)'), # http://www.wtcitv.org + (r'video\.pba\.org', 'WPBA/Channel 30 (WPBA)'), # http://pba.org/ + (r'video\.alaskapublic\.org', 'Alaska Public Media (KAKM)'), # http://alaskapublic.org/kakm + # (r'kuac\.org', 'KUAC (KUAC)'), # http://kuac.org/kuac-tv/ + # (r'ktoo\.org', '360 North (KTOO)'), # http://www.ktoo.org/ + # (r'azpm\.org', 'KUAT 6 (KUAT)'), # http://www.azpm.org/ + (r'video\.azpbs\.org', 'Arizona PBS (KAET)'), # http://www.azpbs.org + (r'portal\.knme\.org', 'KNME-TV/Channel 5 (KNME)'), # http://www.newmexicopbs.org/ + (r'video\.vegaspbs\.org', 'Vegas PBS (KLVX)'), # http://vegaspbs.org/ + (r'watch\.aetn\.org', 'AETN/ARKANSAS ETV NETWORK (KETS)'), # http://www.aetn.org/ + (r'video\.ket\.org', 'KET (WKLE)'), # http://www.ket.org/ + (r'video\.wkno\.org', 'WKNO/Channel 10 (WKNO)'), # http://www.wkno.org/ + (r'video\.lpb\.org', 'LPB/LOUISIANA PUBLIC BROADCASTING (WLPB)'), # http://www.lpb.org/ + (r'videos\.oeta\.tv', 'OETA (KETA)'), # http://www.oeta.tv + (r'video\.optv\.org', 'Ozarks Public Television (KOZK)'), # http://www.optv.org/ + (r'watch\.wsiu\.org', 'WSIU Public Broadcasting (WSIU)'), # http://www.wsiu.org/ + (r'video\.keet\.org', 'KEET TV (KEET)'), # http://www.keet.org + (r'pbs\.kixe\.org', 'KIXE/Channel 9 (KIXE)'), # http://kixe.org/ + (r'video\.kpbs\.org', 'KPBS San Diego (KPBS)'), # http://www.kpbs.org/ + (r'video\.kqed\.org', 'KQED (KQED)'), # http://www.kqed.org + (r'vids\.kvie\.org', 'KVIE Public Television (KVIE)'), # http://www.kvie.org + (r'video\.pbssocal\.org', 'PBS SoCal/KOCE (KOCE)'), # http://www.pbssocal.org/ + (r'video\.valleypbs\.org', 'ValleyPBS (KVPT)'), # http://www.valleypbs.org/ + (r'video\.cptv\.org', 'CONNECTICUT PUBLIC TELEVISION (WEDH)'), # http://cptv.org + (r'watch\.knpb\.org', 'KNPB Channel 5 (KNPB)'), # http://www.knpb.org/ + (r'video\.soptv\.org', 'SOPTV (KSYS)'), # http://www.soptv.org + # (r'klcs\.org', 'KLCS/Channel 58 (KLCS)'), # http://www.klcs.org + # (r'krcb\.org', 'KRCB Television & Radio (KRCB)'), # http://www.krcb.org + # (r'kvcr\.org', 'KVCR TV/DT/FM :: Vision for the Future (KVCR)'), # http://kvcr.org + (r'video\.rmpbs\.org', 'Rocky Mountain PBS (KRMA)'), # http://www.rmpbs.org + (r'video\.kenw\.org', 'KENW-TV3 (KENW)'), # http://www.kenw.org + (r'video\.kued\.org', 'KUED Channel 7 (KUED)'), # http://www.kued.org + (r'video\.wyomingpbs\.org', 'Wyoming PBS (KCWC)'), # http://www.wyomingpbs.org + (r'video\.cpt12\.org', 'Colorado Public Television / KBDI 12 (KBDI)'), # http://www.cpt12.org/ + (r'video\.kbyueleven\.org', 'KBYU-TV (KBYU)'), # http://www.kbyutv.org/ + (r'video\.thirteen\.org', 'Thirteen/WNET New York (WNET)'), # http://www.thirteen.org + (r'video\.wgbh\.org', 'WGBH/Channel 2 (WGBH)'), # http://wgbh.org + (r'video\.wgby\.org', 'WGBY (WGBY)'), # http://www.wgby.org + (r'watch\.njtvonline\.org', 'NJTV Public Media NJ (WNJT)'), # http://www.njtvonline.org/ + # (r'ripbs\.org', 'Rhode Island PBS (WSBE)'), # http://www.ripbs.org/home/ + (r'watch\.wliw\.org', 'WLIW21 (WLIW)'), # http://www.wliw.org/ + (r'video\.mpt\.tv', 'mpt/Maryland Public Television (WMPB)'), # http://www.mpt.org + (r'watch\.weta\.org', 'WETA Television and Radio (WETA)'), # http://www.weta.org + (r'video\.whyy\.org', 'WHYY (WHYY)'), # http://www.whyy.org + (r'video\.wlvt\.org', 'PBS 39 (WLVT)'), # http://www.wlvt.org/ + (r'video\.wvpt\.net', 'WVPT - Your Source for PBS and More! (WVPT)'), # http://www.wvpt.net + (r'video\.whut\.org', 'Howard University Television (WHUT)'), # http://www.whut.org + (r'video\.wedu\.org', 'WEDU PBS (WEDU)'), # http://www.wedu.org + (r'video\.wgcu\.org', 'WGCU Public Media (WGCU)'), # http://www.wgcu.org/ + # (r'wjct\.org', 'WJCT Public Broadcasting (WJCT)'), # http://www.wjct.org + (r'video\.wpbt2\.org', 'WPBT2 (WPBT)'), # http://www.wpbt2.org + (r'video\.wucftv\.org', 'WUCF TV (WUCF)'), # http://wucftv.org + (r'video\.wuft\.org', 'WUFT/Channel 5 (WUFT)'), # http://www.wuft.org + (r'watch\.wxel\.org', 'WXEL/Channel 42 (WXEL)'), # http://www.wxel.org/home/ + (r'video\.wlrn\.org', 'WLRN/Channel 17 (WLRN)'), # http://www.wlrn.org/ + (r'video\.wusf\.usf\.edu', 'WUSF Public Broadcasting (WUSF)'), # http://wusf.org/ + (r'video\.scetv\.org', 'ETV (WRLK)'), # http://www.scetv.org + (r'video\.unctv\.org', 'UNC-TV (WUNC)'), # http://www.unctv.org/ + # (r'pbsguam\.org', 'PBS Guam (KGTF)'), # http://www.pbsguam.org/ + (r'video\.pbshawaii\.org', 'PBS Hawaii - Oceanic Cable Channel 10 (KHET)'), # http://www.pbshawaii.org/ + (r'video\.idahoptv\.org', 'Idaho Public Television (KAID)'), # http://idahoptv.org + (r'video\.ksps\.org', 'KSPS (KSPS)'), # http://www.ksps.org/home/ + (r'watch\.opb\.org', 'OPB (KOPB)'), # http://www.opb.org + (r'watch\.nwptv\.org', 'KWSU/Channel 10 & KTNW/Channel 31 (KWSU)'), # http://www.kwsu.org + (r'video\.will\.illinois\.edu', 'WILL-TV (WILL)'), # http://will.illinois.edu/ + (r'video\.networkknowledge\.tv', 'Network Knowledge - WSEC/Springfield (WSEC)'), # http://www.wsec.tv + (r'video\.wttw\.com', 'WTTW11 (WTTW)'), # http://www.wttw.com/ + # (r'wtvp\.org', 'WTVP & WTVP.org, Public Media for Central Illinois (WTVP)'), # http://www.wtvp.org/ + (r'video\.iptv\.org', 'Iowa Public Television/IPTV (KDIN)'), # http://www.iptv.org/ + (r'video\.ninenet\.org', 'Nine Network (KETC)'), # http://www.ninenet.org + (r'video\.wfwa\.org', 'PBS39 Fort Wayne (WFWA)'), # http://wfwa.org/ + (r'video\.wfyi\.org', 'WFYI Indianapolis (WFYI)'), # http://www.wfyi.org + (r'video\.mptv\.org', 'Milwaukee Public Television (WMVS)'), # http://www.mptv.org + (r'video\.wnin\.org', 'WNIN (WNIN)'), # http://www.wnin.org/ + (r'video\.wnit\.org', 'WNIT Public Television (WNIT)'), # http://www.wnit.org/ + (r'video\.wpt\.org', 'WPT (WPNE)'), # http://www.wpt.org/ + (r'video\.wvut\.org', 'WVUT/Channel 22 (WVUT)'), # http://wvut.org/ + (r'video\.weiu\.net', 'WEIU/Channel 51 (WEIU)'), # http://www.weiu.net + (r'video\.wqpt\.org', 'WQPT-TV (WQPT)'), # http://www.wqpt.org + (r'video\.wycc\.org', 'WYCC PBS Chicago (WYCC)'), # http://www.wycc.org + # (r'lakeshorepublicmedia\.org', 'Lakeshore Public Television (WYIN)'), # http://lakeshorepublicmedia.org/ + (r'video\.wipb\.org', 'WIPB-TV (WIPB)'), # http://wipb.org + (r'video\.indianapublicmedia\.org', 'WTIU (WTIU)'), # http://indianapublicmedia.org/tv/ + (r'watch\.cetconnect\.org', 'CET (WCET)'), # http://www.cetconnect.org + (r'video\.thinktv\.org', 'ThinkTVNetwork (WPTD)'), # http://www.thinktv.org + (r'video\.wbgu\.org', 'WBGU-TV (WBGU)'), # http://wbgu.org + (r'video\.wgvu\.org', 'WGVU TV (WGVU)'), # http://www.wgvu.org/ + (r'video\.netnebraska\.org', 'NET1 (KUON)'), # http://netnebraska.org + (r'video\.pioneer\.org', 'Pioneer Public Television (KWCM)'), # http://www.pioneer.org + (r'watch\.sdpb\.org', 'SDPB Television (KUSD)'), # http://www.sdpb.org + (r'video\.tpt\.org', 'TPT (KTCA)'), # http://www.tpt.org + (r'watch\.ksmq\.org', 'KSMQ (KSMQ)'), # http://www.ksmq.org/ + (r'watch\.kpts\.org', 'KPTS/Channel 8 (KPTS)'), # http://www.kpts.org/ + (r'watch\.ktwu\.org', 'KTWU/Channel 11 (KTWU)'), # http://ktwu.org + # (r'shptv\.org', 'Smoky Hills Public Television (KOOD)'), # http://www.shptv.org + # (r'kcpt\.org', 'KCPT Kansas City Public Television (KCPT)'), # http://kcpt.org/ + # (r'blueridgepbs\.org', 'Blue Ridge PBS (WBRA)'), # http://www.blueridgepbs.org/ + (r'watch\.easttennesseepbs\.org', 'East Tennessee PBS (WSJK)'), # http://easttennesseepbs.org + (r'video\.wcte\.tv', 'WCTE-TV (WCTE)'), # http://www.wcte.org + (r'video\.wljt\.org', 'WLJT, Channel 11 (WLJT)'), # http://wljt.org/ + (r'video\.wosu\.org', 'WOSU TV (WOSU)'), # http://wosu.org/ + (r'video\.woub\.org', 'WOUB/WOUC (WOUB)'), # http://woub.org/tv/index.php?section=5 + (r'video\.wvpublic\.org', 'WVPB (WVPB)'), # http://wvpublic.org/ + (r'video\.wkyupbs\.org', 'WKYU-PBS (WKYU)'), # http://www.wkyupbs.org + # (r'wyes\.org', 'WYES-TV/New Orleans (WYES)'), # http://www.wyes.org + (r'video\.kera\.org', 'KERA 13 (KERA)'), # http://www.kera.org/ + (r'video\.mpbn\.net', 'MPBN (WCBB)'), # http://www.mpbn.net/ + (r'video\.mountainlake\.org', 'Mountain Lake PBS (WCFE)'), # http://www.mountainlake.org/ + (r'video\.nhptv\.org', 'NHPTV (WENH)'), # http://nhptv.org/ + (r'video\.vpt\.org', 'Vermont PBS (WETK)'), # http://www.vpt.org + (r'video\.witf\.org', 'witf (WITF)'), # http://www.witf.org + (r'watch\.wqed\.org', 'WQED Multimedia (WQED)'), # http://www.wqed.org/ + (r'video\.wmht\.org', 'WMHT Educational Telecommunications (WMHT)'), # http://www.wmht.org/home/ + (r'video\.deltabroadcasting\.org', 'Q-TV (WDCQ)'), # http://www.deltabroadcasting.org + (r'video\.dptv\.org', 'WTVS Detroit Public TV (WTVS)'), # http://www.dptv.org/ + (r'video\.wcmu\.org', 'CMU Public Television (WCMU)'), # http://www.wcmu.org + (r'video\.wkar\.org', 'WKAR-TV (WKAR)'), # http://wkar.org/ + (r'wnmuvideo\.nmu\.edu', 'WNMU-TV Public TV 13 (WNMU)'), # http://wnmutv.nmu.edu + (r'video\.wdse\.org', 'WDSE - WRPT (WDSE)'), # http://www.wdse.org/ + (r'video\.wgte\.org', 'WGTE TV (WGTE)'), # http://www.wgte.org + (r'video\.lptv\.org', 'Lakeland Public Television (KAWE)'), # http://www.lakelandptv.org + # (r'prairiepublic\.org', 'PRAIRIE PUBLIC (KFME)'), # http://www.prairiepublic.org/ + (r'video\.kmos\.org', 'KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS)'), # http://www.kmos.org/ + (r'watch\.montanapbs\.org', 'MontanaPBS (KUSM)'), # http://montanapbs.org + (r'video\.krwg\.org', 'KRWG/Channel 22 (KRWG)'), # http://www.krwg.org + (r'video\.kacvtv\.org', 'KACV (KACV)'), # http://www.panhandlepbs.org/home/ + (r'video\.kcostv\.org', 'KCOS/Channel 13 (KCOS)'), # www.kcostv.org + (r'video\.wcny\.org', 'WCNY/Channel 24 (WCNY)'), # http://www.wcny.org + (r'video\.wned\.org', 'WNED (WNED)'), # http://www.wned.org/ + (r'watch\.wpbstv\.org', 'WPBS (WPBS)'), # http://www.wpbstv.org + (r'video\.wskg\.org', 'WSKG Public TV (WSKG)'), # http://wskg.org + (r'video\.wxxi\.org', 'WXXI (WXXI)'), # http://wxxi.org + (r'video\.wpsu\.org', 'WPSU (WPSU)'), # http://www.wpsu.org + # (r'wqln\.org', 'WQLN/Channel 54 (WQLN)'), # http://www.wqln.org + (r'on-demand\.wvia\.org', 'WVIA Public Media Studios (WVIA)'), # http://www.wvia.org/ + (r'video\.wtvi\.org', 'WTVI (WTVI)'), # http://www.wtvi.org/ + # (r'whro\.org', 'WHRO (WHRO)'), # http://whro.org + (r'video\.westernreservepublicmedia\.org', 'Western Reserve PBS (WNEO)'), # http://www.WesternReservePublicMedia.org/ + (r'video\.ideastream\.org', 'WVIZ/PBS ideastream (WVIZ)'), # http://www.wviz.org/ + (r'video\.kcts9\.org', 'KCTS 9 (KCTS)'), # http://kcts9.org/ + (r'video\.basinpbs\.org', 'Basin PBS (KPBT)'), # http://www.basinpbs.org + (r'video\.houstonpbs\.org', 'KUHT / Channel 8 (KUHT)'), # http://www.houstonpublicmedia.org/ + # (r'tamu\.edu', 'KAMU - TV (KAMU)'), # http://KAMU.tamu.edu + # (r'kedt\.org', 'KEDT/Channel 16 (KEDT)'), # http://www.kedt.org + (r'video\.klrn\.org', 'KLRN (KLRN)'), # http://www.klrn.org + (r'video\.klru\.tv', 'KLRU (KLRU)'), # http://www.klru.org + # (r'kmbh\.org', 'KMBH-TV (KMBH)'), # http://www.kmbh.org + # (r'knct\.org', 'KNCT (KNCT)'), # http://www.knct.org + # (r'ktxt\.org', 'KTTZ-TV (KTXT)'), # http://www.ktxt.org + (r'video\.wtjx\.org', 'WTJX Channel 12 (WTJX)'), # http://www.wtjx.org/ + (r'video\.ideastations\.org', 'WCVE PBS (WCVE)'), # http://ideastations.org/ + (r'video\.kbtc\.org', 'KBTC Public Television (KBTC)'), # http://kbtc.org + ) + + IE_NAME = 'pbs' + IE_DESC = 'Public Broadcasting Service (PBS) and member stations: %s' % ', '.join(list(zip(*_STATIONS))[1]) + _VALID_URL = r'''(?x)https?:// (?: # Direct video URL - video\.pbs\.org/(?:viralplayer|video)/(?P<id>[0-9]+)/? | + (?:%s)/(?:viralplayer|video)/(?P<id>[0-9]+)/? | # Article with embedded player (or direct video) (?:www\.)?pbs\.org/(?:[^/]+/){2,5}(?P<presumptive_id>[^/]+?)(?:\.html)?/?(?:$|[?\#]) | # Player - video\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/ + (?:video|player)\.pbs\.org/(?:widget/)?partnerplayer/(?P<player_id>[^/]+)/ ) - ''' + ''' % '|'.join(list(zip(*_STATIONS))[0]) _TESTS = [ { @@ -108,12 +274,12 @@ class PBSIE(InfoExtractor): { 'url': 'http://www.pbs.org/wgbh/americanexperience/films/death/player/', 'info_dict': { - 'id': '2280706814', + 'id': '2276541483', 'display_id': 'player', 'ext': 'mp4', - 'title': 'American Experience - Death and the Civil War', + 'title': 'American Experience - Death and the Civil War, Chapter 1', 'description': 'American Experience, TV’s most-watched history series, brings to life the compelling stories from our past that inform our understanding of the world today.', - 'duration': 6705, + 'duration': 682, 'thumbnail': 're:^https?://.*\.jpg$', }, 'params': { @@ -134,8 +300,57 @@ class PBSIE(InfoExtractor): 'params': { 'skip_download': True, # requires ffmpeg }, + 'skip': 'Expired', + }, + { + # Video embedded in iframe containing angle brackets as attribute's value (e.g. + # "<iframe style='position: absolute;<br />\ntop: 0; left: 0;' ...", see + # https://github.com/rg3/youtube-dl/issues/7059) + 'url': 'http://www.pbs.org/food/features/a-chefs-life-season-3-episode-5-prickly-business/', + 'info_dict': { + 'id': '2365546844', + 'display_id': 'a-chefs-life-season-3-episode-5-prickly-business', + 'ext': 'mp4', + 'title': "A Chef's Life - Season 3, Ep. 5: Prickly Business", + 'description': 'md5:61db2ddf27c9912f09c241014b118ed1', + 'duration': 1480, + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, + }, + { + # Frontline video embedded via flp2012.js + 'url': 'http://www.pbs.org/wgbh/pages/frontline/the-atomic-artists', + 'info_dict': { + 'id': '2070868960', + 'display_id': 'the-atomic-artists', + 'ext': 'mp4', + 'title': 'FRONTLINE - The Atomic Artists', + 'description': 'md5:f5bfbefadf421e8bb8647602011caf8e', + 'duration': 723, + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + 'skip_download': True, # requires ffmpeg + }, + }, + { + 'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true', + 'only_matching': True, + }, + { + 'url': 'http://watch.knpb.org/video/2365616055/', + 'only_matching': True, } ] + _ERRORS = { + 101: 'We\'re sorry, but this video is not yet available.', + 403: 'We\'re sorry, but this video is not available in your region due to right restrictions.', + 404: 'We are experiencing technical difficulties that are preventing us from playing the video at this time. Please check back again soon.', + 410: 'This video has expired and is no longer available for online streaming.', + } def _extract_webpage(self, url): mobj = re.match(self._VALID_URL, url) @@ -158,6 +373,7 @@ class PBSIE(InfoExtractor): MEDIA_ID_REGEXES = [ r"div\s*:\s*'videoembed'\s*,\s*mediaid\s*:\s*'(\d+)'", # frontline video embed r'class="coveplayerid">([^<]+)<', # coveplayer + r'<section[^>]+data-coveid="(\d+)"', # coveplayer from http://www.pbs.org/wgbh/frontline/film/real-csi/ r'<input type="hidden" id="pbs_video_id_[0-9]+" value="([0-9]+)"/>', # jwplayer ] @@ -166,9 +382,30 @@ class PBSIE(InfoExtractor): if media_id: return media_id, presumptive_id, upload_date - url = self._search_regex( - r'<iframe\s+[^>]*\s+src=["\']([^\'"]+partnerplayer[^\'"]+)["\']', - webpage, 'player URL') + # Fronline video embedded via flp + video_id = self._search_regex( + r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid', default=None) + if video_id: + # pkg_id calculation is reverse engineered from + # http://www.pbs.org/wgbh/pages/frontline/js/flp2012.js + prg_id = self._search_regex( + r'videoid\s*:\s*"([\d+a-z]{7,})"', webpage, 'videoid')[7:] + if 'q' in prg_id: + prg_id = prg_id.split('q')[1] + prg_id = int(prg_id, 16) + getdir = self._download_json( + 'http://www.pbs.org/wgbh/pages/frontline/.json/getdir/getdir%d.json' % prg_id, + presumptive_id, 'Downloading getdir JSON', + transform_source=strip_jsonp) + return getdir['mid'], presumptive_id, upload_date + + for iframe in re.findall(r'(?s)<iframe(.+?)></iframe>', webpage): + url = self._search_regex( + r'src=(["\'])(?P<url>.+?partnerplayer.+?)\1', iframe, + 'player URL', default=None, group='url') + if url: + break + mobj = re.match(self._VALID_URL, url) player_id = mobj.group('player_id') @@ -196,7 +433,7 @@ class PBSIE(InfoExtractor): return self.playlist_result(entries, display_id) info = self._download_json( - 'http://video.pbs.org/videoInfo/%s?format=json&type=partner' % video_id, + 'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id, display_id) formats = [] @@ -213,13 +450,11 @@ class PBSIE(InfoExtractor): 'Downloading %s video url info' % encoding_name) if redirect_info['status'] == 'error': - if redirect_info['http_code'] == 403: - message = ( - 'The video is not available in your region due to ' - 'right restrictions') - else: - message = redirect_info['message'] - raise ExtractorError(message, expected=True) + raise ExtractorError( + '%s said: %s' % ( + self.IE_NAME, + self._ERRORS.get(redirect_info['http_code'], redirect_info['message'])), + expected=True) format_url = redirect_info.get('url') if not format_url: diff --git a/youtube_dl/extractor/periscope.py b/youtube_dl/extractor/periscope.py index 8ad936758..514e9b433 100644 --- a/youtube_dl/extractor/periscope.py +++ b/youtube_dl/extractor/periscope.py @@ -2,17 +2,14 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) from ..utils import parse_iso8601 class PeriscopeIE(InfoExtractor): IE_DESC = 'Periscope' - _VALID_URL = r'https?://(?:www\.)?periscope\.tv/w/(?P<id>[^/?#]+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?periscope\.tv/[^/]+/(?P<id>[^/?#]+)' + # Alive example URLs can be found here http://onperiscope.com/ + _TESTS = [{ 'url': 'https://www.periscope.tv/w/aJUQnjY3MjA3ODF8NTYxMDIyMDl2zCg2pECBgwTqRpQuQD352EMPTKQjT4uqlM3cgWFA-g==', 'md5': '65b57957972e503fcbbaeed8f4fa04ca', 'info_dict': { @@ -25,11 +22,17 @@ class PeriscopeIE(InfoExtractor): 'uploader_id': '1465763', }, 'skip': 'Expires in 24 hours', - } + }, { + 'url': 'https://www.periscope.tv/w/1ZkKzPbMVggJv', + 'only_matching': True, + }, { + 'url': 'https://www.periscope.tv/bastaakanoggano/1OdKrlkZZjOJX', + 'only_matching': True, + }] - def _call_api(self, method, token): + def _call_api(self, method, value): return self._download_json( - 'https://api.periscope.tv/api/v2/%s?token=%s' % (method, token), token) + 'https://api.periscope.tv/api/v2/%s?broadcast_id=%s' % (method, value), value) def _real_extract(self, url): token = self._match_id(url) @@ -76,24 +79,3 @@ class PeriscopeIE(InfoExtractor): 'thumbnails': thumbnails, 'formats': formats, } - - -class QuickscopeIE(InfoExtractor): - IE_DESC = 'Quick Scope' - _VALID_URL = r'https?://watchonperiscope\.com/broadcast/(?P<id>\d+)' - _TEST = { - 'url': 'https://watchonperiscope.com/broadcast/56180087', - 'only_matching': True, - } - - def _real_extract(self, url): - broadcast_id = self._match_id(url) - request = compat_urllib_request.Request( - 'https://watchonperiscope.com/api/accessChannel', compat_urllib_parse.urlencode({ - 'broadcast_id': broadcast_id, - 'entry_ticket': '', - 'from_push': 'false', - 'uses_sessions': 'true', - }).encode('utf-8')) - return self.url_result( - self._download_json(request, broadcast_id)['share_url'], 'Periscope') diff --git a/youtube_dl/extractor/phoenix.py b/youtube_dl/extractor/phoenix.py index 46cebc0d7..ac009f60f 100644 --- a/youtube_dl/extractor/phoenix.py +++ b/youtube_dl/extractor/phoenix.py @@ -1,10 +1,10 @@ from __future__ import unicode_literals -from .common import InfoExtractor -from .zdf import extract_from_xml_url +from .zdf import ZDFIE -class PhoenixIE(InfoExtractor): +class PhoenixIE(ZDFIE): + IE_NAME = 'phoenix.de' _VALID_URL = r'''(?x)https?://(?:www\.)?phoenix\.de/content/ (?: phoenix/die_sendungen/(?:[^/]+/)? @@ -41,5 +41,5 @@ class PhoenixIE(InfoExtractor): r'<div class="phx_vod" id="phx_vod_([0-9]+)"', webpage, 'internal video ID') - api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id - return extract_from_xml_url(self, video_id, api_url) + api_url = 'http://www.phoenix.de/php/mediaplayer/data/beitrags_details.php?ak=web&id=%s' % internal_id + return self.extract_from_xml_url(video_id, api_url) diff --git a/youtube_dl/extractor/pladform.py b/youtube_dl/extractor/pladform.py index 551c8c9f0..bc559d1df 100644 --- a/youtube_dl/extractor/pladform.py +++ b/youtube_dl/extractor/pladform.py @@ -1,6 +1,8 @@ # coding: utf-8 from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..utils import ( ExtractorError, @@ -44,6 +46,13 @@ class PladformIE(InfoExtractor): 'only_matching': True, }] + @staticmethod + def _extract_url(webpage): + mobj = re.search( + r'<iframe[^>]+src="(?P<url>(?:https?:)?//out\.pladform\.ru/player\?.+?)"', webpage) + if mobj: + return mobj.group('url') + def _real_extract(self, url): video_id = self._match_id(url) diff --git a/youtube_dl/extractor/played.py b/youtube_dl/extractor/played.py index 8a1c296dd..2856af96f 100644 --- a/youtube_dl/extractor/played.py +++ b/youtube_dl/extractor/played.py @@ -5,12 +5,10 @@ import re import os.path from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -46,7 +44,7 @@ class PlayedIE(InfoExtractor): headers = { b'Content-Type': b'application/x-www-form-urlencoded', } - req = compat_urllib_request.Request(url, post, headers) + req = sanitized_Request(url, post, headers) webpage = self._download_webpage( req, video_id, note='Downloading video page ...') diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py index fd32836cc..55c11b3bf 100644 --- a/youtube_dl/extractor/pluralsight.py +++ b/youtube_dl/extractor/pluralsight.py @@ -2,28 +2,36 @@ from __future__ import unicode_literals import re import json +import random +import collections from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( ExtractorError, int_or_none, parse_duration, + qualities, + sanitized_Request, ) -class PluralsightIE(InfoExtractor): +class PluralsightBaseIE(InfoExtractor): + _API_BASE = 'http://app.pluralsight.com' + + +class PluralsightIE(PluralsightBaseIE): IE_NAME = 'pluralsight' - _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/training/player\?author=(?P<author>[^&]+)&name=(?P<name>[^&]+)(?:&mode=live)?&clip=(?P<clip>\d+)&course=(?P<course>[^&]+)' - _LOGIN_URL = 'https://www.pluralsight.com/id/' + _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/training/player\?' + _LOGIN_URL = 'https://app.pluralsight.com/id/' + _NETRC_MACHINE = 'pluralsight' - _TEST = { + _TESTS = [{ 'url': 'http://www.pluralsight.com/training/player?author=mike-mckeown&name=hosting-sql-server-windows-azure-iaas-m7-mgmt&mode=live&clip=3&course=hosting-sql-server-windows-azure-iaas', 'md5': '4d458cf5cf4c593788672419a8dd4cf8', 'info_dict': { @@ -33,7 +41,14 @@ class PluralsightIE(InfoExtractor): 'duration': 338, }, 'skip': 'Requires pluralsight account credentials', - } + }, { + 'url': 'https://app.pluralsight.com/training/player?course=angularjs-get-started&author=scott-allen&name=angularjs-get-started-m1-introduction&clip=0&mode=live', + 'only_matching': True, + }, { + # available without pluralsight account + 'url': 'http://app.pluralsight.com/training/player?author=scott-allen&name=angularjs-get-started-m1-introduction&mode=live&clip=0&course=angularjs-get-started', + 'only_matching': True, + }] def _real_initialize(self): self._login() @@ -41,7 +56,7 @@ class PluralsightIE(InfoExtractor): def _login(self): (username, password) = self._get_login_info() if username is None: - self.raise_login_required('Pluralsight account is required') + return login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login page') @@ -60,7 +75,7 @@ class PluralsightIE(InfoExtractor): if not post_url.startswith('http'): post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) - request = compat_urllib_request.Request( + request = sanitized_Request( post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8')) request.add_header('Content-Type', 'application/x-www-form-urlencoded') @@ -73,31 +88,48 @@ class PluralsightIE(InfoExtractor): if error: raise ExtractorError('Unable to login: %s' % error, expected=True) + if all(p not in response for p in ('__INITIAL_STATE__', '"currentUser"')): + raise ExtractorError('Unable to log in') + def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - author = mobj.group('author') - name = mobj.group('name') - clip_id = mobj.group('clip') - course = mobj.group('course') + qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + + author = qs.get('author', [None])[0] + name = qs.get('name', [None])[0] + clip_id = qs.get('clip', [None])[0] + course = qs.get('course', [None])[0] + + if any(not f for f in (author, name, clip_id, course,)): + raise ExtractorError('Invalid URL', expected=True) display_id = '%s-%s' % (name, clip_id) webpage = self._download_webpage(url, display_id) - collection = self._parse_json( - self._search_regex( - r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)', - webpage, 'modules'), - display_id) + modules = self._search_regex( + r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)', + webpage, 'modules', default=None) + + if modules: + collection = self._parse_json(modules, display_id) + else: + # Webpage may be served in different layout (see + # https://github.com/rg3/youtube-dl/issues/7607) + collection = self._parse_json( + self._search_regex( + r'var\s+initialState\s*=\s*({.+?});\n', webpage, 'initial state'), + display_id)['course']['modules'] module, clip = None, None for module_ in collection: - if module_.get('moduleName') == name: + if name in (module_.get('moduleName'), module_.get('name')): module = module_ for clip_ in module_.get('clips', []): clip_index = clip_.get('clipIndex') if clip_index is None: + clip_index = clip_.get('index') + if clip_index is None: continue if compat_str(clip_index) == clip_id: clip = clip_ @@ -110,16 +142,49 @@ class PluralsightIE(InfoExtractor): 'low': {'width': 640, 'height': 480}, 'medium': {'width': 848, 'height': 640}, 'high': {'width': 1024, 'height': 768}, + 'high-widescreen': {'width': 1280, 'height': 720}, } + QUALITIES_PREFERENCE = ('low', 'medium', 'high', 'high-widescreen',) + quality_key = qualities(QUALITIES_PREFERENCE) + + AllowedQuality = collections.namedtuple('AllowedQuality', ['ext', 'qualities']) + ALLOWED_QUALITIES = ( - ('webm', ('high',)), - ('mp4', ('low', 'medium', 'high',)), + AllowedQuality('webm', ['high', ]), + AllowedQuality('mp4', ['low', 'medium', 'high', ]), ) + # Some courses also offer widescreen resolution for high quality (see + # https://github.com/rg3/youtube-dl/issues/7766) + widescreen = True if re.search( + r'courseSupportsWidescreenVideoFormats\s*:\s*true', webpage) else False + best_quality = 'high-widescreen' if widescreen else 'high' + if widescreen: + for allowed_quality in ALLOWED_QUALITIES: + allowed_quality.qualities.append(best_quality) + + # In order to minimize the number of calls to ViewClip API and reduce + # the probability of being throttled or banned by Pluralsight we will request + # only single format until formats listing was explicitly requested. + if self._downloader.params.get('listformats', False): + allowed_qualities = ALLOWED_QUALITIES + else: + def guess_allowed_qualities(): + req_format = self._downloader.params.get('format') or 'best' + req_format_split = req_format.split('-', 1) + if len(req_format_split) > 1: + req_ext, req_quality = req_format_split + for allowed_quality in ALLOWED_QUALITIES: + if req_ext == allowed_quality.ext and req_quality in allowed_quality.qualities: + return (AllowedQuality(req_ext, (req_quality, )), ) + req_ext = 'webm' if self._downloader.params.get('prefer_free_formats') else 'mp4' + return (AllowedQuality(req_ext, (best_quality, )), ) + allowed_qualities = guess_allowed_qualities() + formats = [] - for ext, qualities in ALLOWED_QUALITIES: - for quality in qualities: + for ext, qualities_ in allowed_qualities: + for quality in qualities_: f = QUALITIES[quality].copy() clip_post = { 'a': author, @@ -131,19 +196,31 @@ class PluralsightIE(InfoExtractor): 'mt': ext, 'q': '%dx%d' % (f['width'], f['height']), } - request = compat_urllib_request.Request( - 'http://www.pluralsight.com/training/Player/ViewClip', + request = sanitized_Request( + '%s/training/Player/ViewClip' % self._API_BASE, json.dumps(clip_post).encode('utf-8')) request.add_header('Content-Type', 'application/json;charset=utf-8') format_id = '%s-%s' % (ext, quality) clip_url = self._download_webpage( request, display_id, 'Downloading %s URL' % format_id, fatal=False) + + # Pluralsight tracks multiple sequential calls to ViewClip API and start + # to return 429 HTTP errors after some time (see + # https://github.com/rg3/youtube-dl/pull/6989). Moreover it may even lead + # to account ban (see https://github.com/rg3/youtube-dl/issues/6842). + # To somewhat reduce the probability of these consequences + # we will sleep random amount of time before each call to ViewClip. + self._sleep( + random.randint(2, 5), display_id, + '%(video_id)s: Waiting for %(timeout)s seconds to avoid throttling') + if not clip_url: continue f.update({ 'url': clip_url, 'ext': ext, 'format_id': format_id, + 'quality': quality_key(quality), }) formats.append(f) self._sort_formats(formats) @@ -163,10 +240,10 @@ class PluralsightIE(InfoExtractor): } -class PluralsightCourseIE(InfoExtractor): +class PluralsightCourseIE(PluralsightBaseIE): IE_NAME = 'pluralsight:course' - _VALID_URL = r'https?://(?:www\.)?pluralsight\.com/courses/(?P<id>[^/]+)' - _TEST = { + _VALID_URL = r'https?://(?:(?:www|app)\.)?pluralsight\.com/(?:library/)?courses/(?P<id>[^/]+)' + _TESTS = [{ # Free course from Pluralsight Starter Subscription for Microsoft TechNet # https://offers.pluralsight.com/technet?loc=zTS3z&prod=zOTprodz&tech=zOttechz&prog=zOTprogz&type=zSOz&media=zOTmediaz&country=zUSz 'url': 'http://www.pluralsight.com/courses/hosting-sql-server-windows-azure-iaas', @@ -176,7 +253,14 @@ class PluralsightCourseIE(InfoExtractor): 'description': 'md5:61b37e60f21c4b2f91dc621a977d0986', }, 'playlist_count': 31, - } + }, { + # available without pluralsight account + 'url': 'https://www.pluralsight.com/courses/angularjs-get-started', + 'only_matching': True, + }, { + 'url': 'https://app.pluralsight.com/library/courses/understanding-microsoft-azure-amazon-aws/table-of-contents', + 'only_matching': True, + }] def _real_extract(self, url): course_id = self._match_id(url) @@ -184,14 +268,14 @@ class PluralsightCourseIE(InfoExtractor): # TODO: PSM cookie course = self._download_json( - 'http://www.pluralsight.com/data/course/%s' % course_id, + '%s/data/course/%s' % (self._API_BASE, course_id), course_id, 'Downloading course JSON') title = course['title'] description = course.get('description') or course.get('shortDescription') course_data = self._download_json( - 'http://www.pluralsight.com/data/course/content/%s' % course_id, + '%s/data/course/content/%s' % (self._API_BASE, course_id), course_id, 'Downloading course data JSON') entries = [] @@ -201,7 +285,7 @@ class PluralsightCourseIE(InfoExtractor): if not player_parameters: continue entries.append(self.url_result( - 'http://www.pluralsight.com/training/player?%s' % player_parameters, + '%s/training/player?%s' % (self._API_BASE, player_parameters), 'Pluralsight')) return self.playlist_result(entries, course_id, title, description) diff --git a/youtube_dl/extractor/pornhd.py b/youtube_dl/extractor/pornhd.py index dbb2c3bd9..57c78ba52 100644 --- a/youtube_dl/extractor/pornhd.py +++ b/youtube_dl/extractor/pornhd.py @@ -36,7 +36,8 @@ class PornHdIE(InfoExtractor): webpage = self._download_webpage(url, display_id or video_id) title = self._html_search_regex( - r'<title>(.+) porn HD.+?</title>', webpage, 'title') + [r'<span[^>]+class=["\']video-name["\'][^>]*>([^<]+)', + r'<title>(.+?) - .*?[Pp]ornHD.*?</title>'], webpage, 'title') description = self._html_search_regex( r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False) view_count = int_or_none(self._html_search_regex( diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py index a656ad85a..08275687d 100644 --- a/youtube_dl/extractor/pornhub.py +++ b/youtube_dl/extractor/pornhub.py @@ -8,10 +8,10 @@ from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, compat_urllib_parse_urlparse, - compat_urllib_request, ) from ..utils import ( ExtractorError, + sanitized_Request, str_to_int, ) from ..aes import ( @@ -53,7 +53,7 @@ class PornHubIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://www.pornhub.com/view_video.php?viewkey=%s' % video_id) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) @@ -147,7 +147,8 @@ class PornHubPlaylistIE(InfoExtractor): entries = [ self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub') - for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage)) + for video_url in set(re.findall( + r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"', webpage)) ] playlist = self._parse_json( diff --git a/youtube_dl/extractor/pornotube.py b/youtube_dl/extractor/pornotube.py index 34735c51e..5398e708b 100644 --- a/youtube_dl/extractor/pornotube.py +++ b/youtube_dl/extractor/pornotube.py @@ -3,11 +3,9 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( int_or_none, + sanitized_Request, ) @@ -46,7 +44,7 @@ class PornotubeIE(InfoExtractor): 'authenticationSpaceKey': originAuthenticationSpaceKey, 'credentials': 'Clip Application', } - token_req = compat_urllib_request.Request( + token_req = sanitized_Request( 'https://api.aebn.net/auth/v1/token/primal', data=json.dumps(token_req_data).encode('utf-8')) token_req.add_header('Content-Type', 'application/json') @@ -56,7 +54,7 @@ class PornotubeIE(InfoExtractor): token = token_answer['tokenKey'] # Get video URL - delivery_req = compat_urllib_request.Request( + delivery_req = sanitized_Request( 'https://api.aebn.net/delivery/v1/clips/%s/MP4' % video_id) delivery_req.add_header('Authorization', token) delivery_info = self._download_json( @@ -64,7 +62,7 @@ class PornotubeIE(InfoExtractor): video_url = delivery_info['mediaUrl'] # Get additional info (title etc.) - info_req = compat_urllib_request.Request( + info_req = sanitized_Request( 'https://api.aebn.net/content/v1/clips/%s?expand=' 'title,description,primaryImageNumber,startSecond,endSecond,' 'movie.title,movie.MovieId,movie.boxCoverFront,movie.stars,' diff --git a/youtube_dl/extractor/primesharetv.py b/youtube_dl/extractor/primesharetv.py index 304359dc5..85aae9576 100644 --- a/youtube_dl/extractor/primesharetv.py +++ b/youtube_dl/extractor/primesharetv.py @@ -1,11 +1,11 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, +from ..compat import compat_urllib_parse +from ..utils import ( + ExtractorError, + sanitized_Request, ) -from ..utils import ExtractorError class PrimeShareTVIE(InfoExtractor): @@ -41,7 +41,7 @@ class PrimeShareTVIE(InfoExtractor): webpage, 'wait time', default=7)) + 1 self._sleep(wait_time, video_id) - req = compat_urllib_request.Request( + req = sanitized_Request( url, compat_urllib_parse.urlencode(fields), headers) video_page = self._download_webpage( req, video_id, 'Downloading video page') diff --git a/youtube_dl/extractor/promptfile.py b/youtube_dl/extractor/promptfile.py index 8190ed676..d5357283a 100644 --- a/youtube_dl/extractor/promptfile.py +++ b/youtube_dl/extractor/promptfile.py @@ -4,13 +4,11 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( determine_ext, ExtractorError, + sanitized_Request, ) @@ -37,7 +35,7 @@ class PromptFileIE(InfoExtractor): fields = self._hidden_inputs(webpage) post = compat_urllib_parse.urlencode(fields) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage( req, video_id, 'Downloading video page') diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py index effcf1db3..baa54a3af 100644 --- a/youtube_dl/extractor/prosiebensat1.py +++ b/youtube_dl/extractor/prosiebensat1.py @@ -20,7 +20,7 @@ from ..utils import ( class ProSiebenSat1IE(InfoExtractor): IE_NAME = 'prosiebensat1' IE_DESC = 'ProSiebenSat.1 Digital' - _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at)|ran\.de|fem\.com)/(?P<id>.+)' + _VALID_URL = r'https?://(?:www\.)?(?:(?:prosieben|prosiebenmaxx|sixx|sat1|kabeleins|the-voice-of-germany)\.(?:de|at|ch)|ran\.de|fem\.com)/(?P<id>.+)' _TESTS = [ { diff --git a/youtube_dl/extractor/qqmusic.py b/youtube_dl/extractor/qqmusic.py index 1654a641f..1ba3bbddf 100644 --- a/youtube_dl/extractor/qqmusic.py +++ b/youtube_dl/extractor/qqmusic.py @@ -7,11 +7,11 @@ import re from .common import InfoExtractor from ..utils import ( + sanitized_Request, strip_jsonp, unescapeHTML, clean_html, ) -from ..compat import compat_urllib_request class QQMusicIE(InfoExtractor): @@ -25,7 +25,7 @@ class QQMusicIE(InfoExtractor): 'id': '004295Et37taLD', 'ext': 'mp3', 'title': '可惜没如果', - 'upload_date': '20141227', + 'release_date': '20141227', 'creator': '林俊杰', 'description': 'md5:d327722d0361576fde558f1ac68a7065', 'thumbnail': 're:^https?://.*\.jpg$', @@ -38,11 +38,26 @@ class QQMusicIE(InfoExtractor): 'id': '004MsGEo3DdNxV', 'ext': 'mp3', 'title': '如果', - 'upload_date': '20050626', + 'release_date': '20050626', 'creator': '李季美', 'description': 'md5:46857d5ed62bc4ba84607a805dccf437', 'thumbnail': 're:^https?://.*\.jpg$', } + }, { + 'note': 'lyrics not in .lrc format', + 'url': 'http://y.qq.com/#type=song&mid=001JyApY11tIp6', + 'info_dict': { + 'id': '001JyApY11tIp6', + 'ext': 'mp3', + 'title': 'Shadows Over Transylvania', + 'release_date': '19970225', + 'creator': 'Dark Funeral', + 'description': 'md5:ed14d5bd7ecec19609108052c25b2c11', + 'thumbnail': 're:^https?://.*\.jpg$', + }, + 'params': { + 'skip_download': True, + }, }] _FORMATS = { @@ -112,15 +127,27 @@ class QQMusicIE(InfoExtractor): self._check_formats(formats, mid) self._sort_formats(formats) - return { + actual_lrc_lyrics = ''.join( + line + '\n' for line in re.findall( + r'(?m)^(\[[0-9]{2}:[0-9]{2}(?:\.[0-9]{2,})?\][^\n]*|\[[^\]]*\])', lrc_content)) + + info_dict = { 'id': mid, 'formats': formats, 'title': song_name, - 'upload_date': publish_time, + 'release_date': publish_time, 'creator': singer, 'description': lrc_content, - 'thumbnail': thumbnail_url, + 'thumbnail': thumbnail_url } + if actual_lrc_lyrics: + info_dict['subtitles'] = { + 'origin': [{ + 'ext': 'lrc', + 'data': actual_lrc_lyrics, + }] + } + return info_dict class QQPlaylistBaseIE(InfoExtractor): @@ -174,7 +201,7 @@ class QQMusicSingerIE(QQPlaylistBaseIE): singer_desc = None if singer_id: - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://s.plcloud.music.qq.com/fcgi-bin/fcg_get_singer_desc.fcg?utf8=1&outCharset=utf-8&format=xml&singerid=%s' % singer_id) req.add_header( 'Referer', 'http://s.plcloud.music.qq.com/xhr_proxy_utf8.html') diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py index 7ff1d06c4..a4dc5c335 100644 --- a/youtube_dl/extractor/rai.py +++ b/youtube_dl/extractor/rai.py @@ -8,20 +8,24 @@ from ..compat import ( compat_urlparse, ) from ..utils import ( + ExtractorError, + determine_ext, parse_duration, unified_strdate, + int_or_none, + xpath_text, ) -class RaiIE(InfoExtractor): - _VALID_URL = r'(?P<url>(?P<host>http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it))/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html)' +class RaiTVIE(InfoExtractor): + _VALID_URL = r'http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/(?:[^/]+/)+media/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html' _TESTS = [ { 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html', - 'md5': 'c064c0b2d09c278fb293116ef5d0a32d', + 'md5': '96382709b61dd64a6b88e0f791e6df4c', 'info_dict': { 'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'Report del 07/04/2014', 'description': 'md5:f27c544694cacb46a078db84ec35d2d9', 'upload_date': '20140407', @@ -30,16 +34,14 @@ class RaiIE(InfoExtractor): }, { 'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html', - 'md5': '8bb9c151924ce241b74dd52ef29ceafa', + 'md5': 'd9751b78eac9710d62c2447b224dea39', 'info_dict': { 'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'TG PRIMO TEMPO', - 'description': '', 'upload_date': '20140612', 'duration': 1758, }, - 'skip': 'Error 404', }, { 'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html', @@ -55,110 +57,103 @@ class RaiIE(InfoExtractor): }, { 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html', - 'md5': '35694f062977fe6619943f08ed935730', 'info_dict': { 'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132', 'ext': 'mp4', 'title': 'Alluvione in Sardegna e dissesto idrogeologico', 'description': 'Edizione delle ore 20:30 ', - } + }, + 'skip': 'invalid urls', }, { 'url': 'http://www.ilcandidato.rai.it/dl/ray/media/Il-Candidato---Primo-episodio-Le-Primarie-28e5525a-b495-45e8-a7c3-bc48ba45d2b6.html', - 'md5': '02b64456f7cc09f96ff14e7dd489017e', + 'md5': '496ab63e420574447f70d02578333437', 'info_dict': { 'id': '28e5525a-b495-45e8-a7c3-bc48ba45d2b6', 'ext': 'flv', 'title': 'Il Candidato - Primo episodio: "Le Primarie"', - 'description': 'Primo appuntamento con "Il candidato" con Filippo Timi, alias Piero Zucca presidente!', - 'uploader': 'RaiTre', + 'description': 'md5:364b604f7db50594678f483353164fb8', + 'upload_date': '20140923', + 'duration': 386, } }, - { - 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html', - 'md5': '037104d2c14132887e5e4cf114569214', - 'info_dict': { - 'id': '0c7a664b-d0f4-4b2c-8835-3f82e46f433e', - 'ext': 'flv', - 'title': 'Il pacco', - 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a', - 'uploader': 'RaiTre', - 'upload_date': '20141221', - }, - } ] - def _extract_relinker_url(self, webpage): - return self._proto_relative_url(self._search_regex( - [r'name="videourl" content="([^"]+)"', r'var\s+videoURL(?:_MP4)?\s*=\s*"([^"]+)"'], - webpage, 'relinker url', default=None)) - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - host = mobj.group('host') + video_id = self._match_id(url) + media = self._download_json( + 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % video_id, + video_id, 'Downloading video JSON') - webpage = self._download_webpage(url, video_id) + thumbnails = [] + for image_type in ('image', 'image_medium', 'image_300'): + thumbnail_url = media.get(image_type) + if thumbnail_url: + thumbnails.append({ + 'url': thumbnail_url, + }) - relinker_url = self._extract_relinker_url(webpage) - - if not relinker_url: - iframe_url = self._search_regex( - [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"', - r'drawMediaRaiTV\(["\'](.+?)["\']'], - webpage, 'iframe') - if not iframe_url.startswith('http'): - iframe_url = compat_urlparse.urljoin(url, iframe_url) - webpage = self._download_webpage( - iframe_url, video_id) - relinker_url = self._extract_relinker_url(webpage) - - relinker = self._download_json( - '%s&output=47' % relinker_url, video_id) - - media_url = relinker['video'][0] - ct = relinker.get('ct') - if ct == 'f4m': - formats = self._extract_f4m_formats( - media_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id) - else: - formats = [{ - 'url': media_url, - 'format_id': ct, - }] + subtitles = [] + formats = [] + media_type = media['type'] + if 'Audio' in media_type: + formats.append({ + 'format_id': media.get('formatoAudio'), + 'url': media['audioUrl'], + 'ext': media.get('formatoAudio'), + }) + elif 'Video' in media_type: + def fix_xml(xml): + return xml.replace(' tag elementi', '').replace('>/', '</') + + relinker = self._download_xml( + media['mediaUri'] + '&output=43', + video_id, transform_source=fix_xml) - json_link = self._html_search_meta( - 'jsonlink', webpage, 'JSON link', default=None) - if json_link: - media = self._download_json( - host + json_link, video_id, 'Downloading video JSON') - title = media.get('name') - description = media.get('desc') - thumbnail = media.get('image_300') or media.get('image_medium') or media.get('image') - duration = parse_duration(media.get('length')) - uploader = media.get('author') - upload_date = unified_strdate(media.get('date')) + has_subtitle = False + + for element in relinker.findall('element'): + media_url = xpath_text(element, 'url') + ext = determine_ext(media_url) + content_type = xpath_text(element, 'content-type') + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + media_url, video_id, 'mp4', 'm3u8_native', + m3u8_id='hls', fatal=False)) + elif ext == 'f4m': + formats.extend(self._extract_f4m_formats( + media_url + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', + video_id, f4m_id='hds', fatal=False)) + elif ext == 'stl': + has_subtitle = True + elif content_type.startswith('video/'): + bitrate = int_or_none(xpath_text(element, 'bitrate')) + formats.append({ + 'url': media_url, + 'tbr': bitrate if bitrate > 0 else None, + 'format_id': 'http-%d' % bitrate if bitrate > 0 else 'http', + }) + elif content_type.startswith('image/'): + thumbnails.append({ + 'url': media_url, + }) + + self._sort_formats(formats) + + if has_subtitle: + webpage = self._download_webpage(url, video_id) + subtitles = self._get_subtitles(video_id, webpage) else: - title = (self._search_regex( - r'var\s+videoTitolo\s*=\s*"(.+?)";', - webpage, 'title', default=None) or self._og_search_title(webpage)).replace('\\"', '"') - description = self._og_search_description(webpage) - thumbnail = self._og_search_thumbnail(webpage) - duration = None - uploader = self._html_search_meta('Editore', webpage, 'uploader') - upload_date = unified_strdate(self._html_search_meta( - 'item-date', webpage, 'upload date', default=None)) - - subtitles = self.extract_subtitles(video_id, webpage) + raise ExtractorError('not a media file') return { 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'uploader': uploader, - 'upload_date': upload_date, - 'duration': duration, + 'title': media['name'], + 'description': media.get('desc'), + 'thumbnails': thumbnails, + 'uploader': media.get('author'), + 'upload_date': unified_strdate(media.get('date')), + 'duration': parse_duration(media.get('length')), 'formats': formats, 'subtitles': subtitles, } @@ -177,3 +172,36 @@ class RaiIE(InfoExtractor): 'url': 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions), }] return subtitles + + +class RaiIE(InfoExtractor): + _VALID_URL = r'http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html' + _TESTS = [ + { + 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html', + 'md5': 'e0e7a8a131e249d1aa0ebf270d1d8db7', + 'info_dict': { + 'id': '59d69d28-6bb6-409d-a4b5-ed44096560af', + 'ext': 'flv', + 'title': 'Il pacco', + 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a', + 'upload_date': '20141221', + }, + } + ] + + @classmethod + def suitable(cls, url): + return False if RaiTVIE.suitable(url) else super(RaiIE, cls).suitable(url) + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + iframe_url = self._search_regex( + [r'<iframe[^>]+src="([^"]*/dl/[^"]+\?iframe\b[^"]*)"', + r'drawMediaRaiTV\(["\'](.+?)["\']'], + webpage, 'iframe') + if not iframe_url.startswith('http'): + iframe_url = compat_urlparse.urljoin(url, iframe_url) + return self.url_result(iframe_url) diff --git a/youtube_dl/extractor/rtbf.py b/youtube_dl/extractor/rtbf.py index e4215d546..e42b319a3 100644 --- a/youtube_dl/extractor/rtbf.py +++ b/youtube_dl/extractor/rtbf.py @@ -9,8 +9,8 @@ from ..utils import ( class RTBFIE(InfoExtractor): - _VALID_URL = r'https?://www.rtbf.be/video/[^\?]+\?id=(?P<id>\d+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?rtbf\.be/(?:video/[^?]+\?.*\bid=|ouftivi/(?:[^/]+/)*[^?]+\?.*\bvideoId=)(?P<id>\d+)' + _TESTS = [{ 'url': 'https://www.rtbf.be/video/detail_les-diables-au-coeur-episode-2?id=1921274', 'md5': '799f334ddf2c0a582ba80c44655be570', 'info_dict': { @@ -19,7 +19,14 @@ class RTBFIE(InfoExtractor): 'title': 'Les Diables au coeur (épisode 2)', 'duration': 3099, } - } + }, { + # geo restricted + 'url': 'http://www.rtbf.be/ouftivi/heros/detail_scooby-doo-mysteres-associes?id=1097&videoId=2057442', + 'only_matching': True, + }, { + 'url': 'http://www.rtbf.be/ouftivi/niouzz?videoId=2055858', + 'only_matching': True, + }] _QUALITIES = [ ('mobile', 'mobile'), @@ -36,7 +43,7 @@ class RTBFIE(InfoExtractor): data = self._parse_json( unescapeHTML(self._search_regex( - r'data-video="([^"]+)"', webpage, 'data video')), + r'data-media="([^"]+)"', webpage, 'data video')), video_id) if data.get('provider').lower() == 'youtube': diff --git a/youtube_dl/extractor/rte.py b/youtube_dl/extractor/rte.py index 04158b993..d9cfbf180 100644 --- a/youtube_dl/extractor/rte.py +++ b/youtube_dl/extractor/rte.py @@ -9,16 +9,16 @@ from ..utils import ( class RteIE(InfoExtractor): - _VALID_URL = r'http?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/(?P<id>[0-9]+)/' + _VALID_URL = r'https?://(?:www\.)?rte\.ie/player/[^/]{2,3}/show/[^/]+/(?P<id>[0-9]+)' _TEST = { - 'url': 'http://www.rte.ie/player/de/show/10363114/', + 'url': 'http://www.rte.ie/player/ie/show/iwitness-862/10478715/', 'info_dict': { - 'id': '10363114', + 'id': '10478715', 'ext': 'mp4', - 'title': 'One News', + 'title': 'Watch iWitness online', 'thumbnail': 're:^https?://.*\.jpg$', - 'description': 'The One O\'Clock News followed by Weather.', - 'duration': 436.844, + 'description': 'iWitness : The spirit of Ireland, one voice and one minute at a time.', + 'duration': 60.046, }, 'params': { 'skip_download': 'f4m fails with --test atm' diff --git a/youtube_dl/extractor/rts.py b/youtube_dl/extractor/rts.py index 12639f08b..3cc32847b 100644 --- a/youtube_dl/extractor/rts.py +++ b/youtube_dl/extractor/rts.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re -from .common import InfoExtractor +from .srgssr import SRGSSRIE from ..compat import ( compat_str, compat_urllib_parse_urlparse, @@ -17,23 +17,14 @@ from ..utils import ( ) -class RTSIE(InfoExtractor): +class RTSIE(SRGSSRIE): IE_DESC = 'RTS.ch' - _VALID_URL = r'''(?x) - (?: - rts:(?P<rts_id>\d+)| - https?:// - (?:www\.)?rts\.ch/ - (?: - (?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html| - play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+) - ) - )''' + _VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:www\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html' _TESTS = [ { 'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html', - 'md5': '753b877968ad8afaeddccc374d4256a5', + 'md5': 'f254c4b26fb1d3c183793d52bc40d3e7', 'info_dict': { 'id': '3449373', 'display_id': 'les-enfants-terribles', @@ -47,13 +38,17 @@ class RTSIE(InfoExtractor): 'thumbnail': 're:^https?://.*\.image', 'view_count': int, }, + 'params': { + # m3u8 download + 'skip_download': True, + } }, { 'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html', - 'md5': 'c148457a27bdc9e5b1ffe081a7a8337b', + 'md5': 'f1077ac5af686c76528dc8d7c5df29ba', 'info_dict': { - 'id': '5624067', - 'display_id': 'entre-ciel-et-mer', + 'id': '5742494', + 'display_id': '5742494', 'ext': 'mp4', 'duration': 3720, 'title': 'Les yeux dans les cieux - Mon homard au Canada', @@ -64,6 +59,10 @@ class RTSIE(InfoExtractor): 'thumbnail': 're:^https?://.*\.image', 'view_count': int, }, + 'params': { + # m3u8 download + 'skip_download': True, + } }, { 'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html', @@ -85,7 +84,7 @@ class RTSIE(InfoExtractor): }, { 'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html', - 'md5': '9bb06503773c07ce83d3cbd793cebb91', + 'md5': '9f713382f15322181bb366cc8c3a4ff0', 'info_dict': { 'id': '5745356', 'display_id': 'londres-cachee-par-un-epais-smog', @@ -99,6 +98,10 @@ class RTSIE(InfoExtractor): 'thumbnail': 're:^https?://.*\.image', 'view_count': int, }, + 'params': { + # m3u8 download + 'skip_download': True, + } }, { 'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html', @@ -115,23 +118,6 @@ class RTSIE(InfoExtractor): }, }, { - 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260', - 'md5': '968777c8779e5aa2434be96c54e19743', - 'info_dict': { - 'id': '6348260', - 'display_id': 'le-19h30', - 'ext': 'mp4', - 'duration': 1796, - 'title': 'Le 19h30', - 'description': '', - 'uploader': 'Le 19h30', - 'upload_date': '20141201', - 'timestamp': 1417458600, - 'thumbnail': 're:^https?://.*\.image', - 'view_count': int, - }, - }, - { # article with videos on rhs 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html', 'info_dict': { @@ -139,42 +125,47 @@ class RTSIE(InfoExtractor): 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse', }, 'playlist_mincount': 5, - }, - { - 'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280', - 'only_matching': True, } ] def _real_extract(self, url): m = re.match(self._VALID_URL, url) - video_id = m.group('rts_id') or m.group('id') or m.group('id_new') - display_id = m.group('display_id') or m.group('display_id_new') + media_id = m.group('rts_id') or m.group('id') + display_id = m.group('display_id') or media_id def download_json(internal_id): return self._download_json( 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id, display_id) - all_info = download_json(video_id) + all_info = download_json(media_id) - # video_id extracted out of URL is not always a real id + # media_id extracted out of URL is not always a real id if 'video' not in all_info and 'audio' not in all_info: page = self._download_webpage(url, display_id) # article with videos on rhs videos = re.findall( - r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:rts:video:(\d+)"', + r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"', page) + if not videos: + videos = re.findall( + r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"', + page) if videos: - entries = [self.url_result('rts:%s' % video_urn, 'RTS') for video_urn in videos] - return self.playlist_result(entries, video_id, self._og_search_title(page)) + entries = [self.url_result('srgssr:%s' % video_urn, 'SRGSSR') for video_urn in videos] + return self.playlist_result(entries, media_id, self._og_search_title(page)) internal_id = self._html_search_regex( r'<(?:video|audio) data-id="([0-9]+)"', page, 'internal video id') all_info = download_json(internal_id) + media_type = 'video' if 'video' in all_info else 'audio' + + # check for errors + self.get_media_data('rts', media_type, media_id) + info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio'] upload_timestamp = parse_iso8601(info.get('broadcast_date')) @@ -190,19 +181,23 @@ class RTSIE(InfoExtractor): formats = [] for format_id, format_url in info['streams'].items(): + if format_id == 'hds_sd' and 'hds' in info['streams']: + continue + if format_id == 'hls_sd' and 'hls' in info['streams']: + continue if format_url.endswith('.f4m'): token = self._download_xml( 'http://tp.srgssr.ch/token/akahd.xml?stream=%s/*' % compat_urllib_parse_urlparse(format_url).path, - video_id, 'Downloading %s token' % format_id) + media_id, 'Downloading %s token' % format_id) auth_params = xpath_text(token, './/authparams', 'auth params') if not auth_params: continue formats.extend(self._extract_f4m_formats( '%s?%s&hdcore=3.4.0&plugin=aasp-3.4.0.132.66' % (format_url, auth_params), - video_id, f4m_id=format_id)) + media_id, f4m_id=format_id, fatal=False)) elif format_url.endswith('.m3u8'): formats.extend(self._extract_m3u8_formats( - format_url, video_id, 'mp4', m3u8_id=format_id)) + format_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False)) else: formats.append({ 'format_id': format_id, @@ -217,11 +212,11 @@ class RTSIE(InfoExtractor): 'tbr': media['rate'] or extract_bitrate(media['url']), } for media in info['media'] if media.get('rate')]) - self._check_formats(formats, video_id) + self._check_formats(formats, media_id) self._sort_formats(formats) return { - 'id': video_id, + 'id': media_id, 'display_id': display_id, 'formats': formats, 'title': info['title'], diff --git a/youtube_dl/extractor/rtve.py b/youtube_dl/extractor/rtve.py index 5b97d33ca..603d7bd00 100644 --- a/youtube_dl/extractor/rtve.py +++ b/youtube_dl/extractor/rtve.py @@ -6,11 +6,11 @@ import re import time from .common import InfoExtractor -from ..compat import compat_urllib_request, compat_urlparse from ..utils import ( ExtractorError, float_or_none, remove_end, + sanitized_Request, std_headers, struct_unpack, ) @@ -102,20 +102,14 @@ class RTVEALaCartaIE(InfoExtractor): if info['state'] == 'DESPU': raise ExtractorError('The video is no longer available', expected=True) png_url = 'http://www.rtve.es/ztnr/movil/thumbnail/%s/videos/%s.png' % (self._manager, video_id) - png_request = compat_urllib_request.Request(png_url) + png_request = sanitized_Request(png_url) png_request.add_header('Referer', url) png = self._download_webpage(png_request, video_id, 'Downloading url information') video_url = _decrypt_url(png) if not video_url.endswith('.f4m'): - auth_url = video_url.replace( + video_url = video_url.replace( 'resources/', 'auth/resources/' ).replace('.net.rtve', '.multimedia.cdn.rtve') - video_path = self._download_webpage( - auth_url, video_id, 'Getting video url') - # Use mvod1.akcdn instead of flash.akamaihd.multimedia.cdn to get - # the right Content-Length header and the mp4 format - video_url = compat_urlparse.urljoin( - 'http://mvod1.akcdn.rtve.es/', video_path) subtitles = None if info.get('sbtFile') is not None: diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py index d94dc7399..c5c47d01e 100644 --- a/youtube_dl/extractor/rutube.py +++ b/youtube_dl/extractor/rutube.py @@ -9,7 +9,7 @@ from ..compat import ( compat_str, ) from ..utils import ( - ExtractorError, + determine_ext, unified_strdate, ) @@ -17,9 +17,9 @@ from ..utils import ( class RutubeIE(InfoExtractor): IE_NAME = 'rutube' IE_DESC = 'Rutube videos' - _VALID_URL = r'https?://rutube\.ru/video/(?P<id>[\da-z]{32})' + _VALID_URL = r'https?://rutube\.ru/(?:video|play/embed)/(?P<id>[\da-z]{32})' - _TEST = { + _TESTS = [{ 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/', 'info_dict': { 'id': '3eac3b4561676c17df9132a9a1e62e3e', @@ -36,7 +36,10 @@ class RutubeIE(InfoExtractor): # It requires ffmpeg (m3u8 download) 'skip_download': True, }, - } + }, { + 'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) @@ -51,10 +54,21 @@ class RutubeIE(InfoExtractor): 'http://rutube.ru/api/play/options/%s/?format=json' % video_id, video_id, 'Downloading options JSON') - m3u8_url = options['video_balancer'].get('m3u8') - if m3u8_url is None: - raise ExtractorError('Couldn\'t find m3u8 manifest url') - formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4') + formats = [] + for format_id, format_url in options['video_balancer'].items(): + ext = determine_ext(format_url) + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)) + elif ext == 'f4m': + formats.extend(self._extract_f4m_formats( + format_url, video_id, f4m_id=format_id, fatal=False)) + else: + formats.append({ + 'url': format_url, + 'format_id': format_id, + }) + self._sort_formats(formats) return { 'id': video['id'], @@ -74,9 +88,9 @@ class RutubeIE(InfoExtractor): class RutubeEmbedIE(InfoExtractor): IE_NAME = 'rutube:embed' IE_DESC = 'Rutube embedded videos' - _VALID_URL = 'https?://rutube\.ru/video/embed/(?P<id>[0-9]+)' + _VALID_URL = 'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)' - _TEST = { + _TESTS = [{ 'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=', 'info_dict': { 'id': 'a10e53b86e8f349080f718582ce4c661', @@ -90,7 +104,10 @@ class RutubeEmbedIE(InfoExtractor): 'params': { 'skip_download': 'Requires ffmpeg', }, - } + }, { + 'url': 'http://rutube.ru/play/embed/8083783', + 'only_matching': True, + }] def _real_extract(self, url): embed_id = self._match_id(url) diff --git a/youtube_dl/extractor/rutv.py b/youtube_dl/extractor/rutv.py index d9df06861..f7fe1fece 100644 --- a/youtube_dl/extractor/rutv.py +++ b/youtube_dl/extractor/rutv.py @@ -131,7 +131,7 @@ class RUTVIE(InfoExtractor): is_live = video_type == 'live' json_data = self._download_json( - 'http://player.rutv.ru/iframe/%splay/id/%s' % ('live-' if is_live else '', video_id), + 'http://player.rutv.ru/iframe/data%s/id/%s' % ('live' if is_live else 'video', video_id), video_id, 'Downloading JSON') if json_data['errors']: diff --git a/youtube_dl/extractor/ruutu.py b/youtube_dl/extractor/ruutu.py index c67ad25ce..41fddc375 100644 --- a/youtube_dl/extractor/ruutu.py +++ b/youtube_dl/extractor/ruutu.py @@ -57,16 +57,17 @@ class RuutuIE(InfoExtractor): extract_formats(child) elif child.tag.endswith('File'): video_url = child.text - if not video_url or video_url in processed_urls or 'NOT_USED' in video_url: + if (not video_url or video_url in processed_urls or + any(p in video_url for p in ('NOT_USED', 'NOT-USED'))): return processed_urls.append(video_url) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( - video_url, video_id, 'mp4', m3u8_id='hls')) + video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( - video_url, video_id, f4m_id='hds')) + video_url, video_id, f4m_id='hds', fatal=False)) else: proto = compat_urllib_parse_urlparse(video_url).scheme if not child.tag.startswith('HTTP') and proto != 'rtmp': @@ -74,7 +75,7 @@ class RuutuIE(InfoExtractor): preference = -1 if proto == 'rtmp' else 1 label = child.get('label') tbr = int_or_none(child.get('bitrate')) - width, height = [int_or_none(x) for x in child.get('resolution', '').split('x')] + width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]] formats.append({ 'format_id': '%s-%s' % (proto, label if label else tbr), 'url': video_url, diff --git a/youtube_dl/extractor/safari.py b/youtube_dl/extractor/safari.py index a602af692..7de7b7273 100644 --- a/youtube_dl/extractor/safari.py +++ b/youtube_dl/extractor/safari.py @@ -4,16 +4,14 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from .brightcove import BrightcoveIE +from .brightcove import BrightcoveLegacyIE -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) from ..utils import ( ExtractorError, + sanitized_Request, smuggle_url, std_headers, + urlencode_postdata, ) @@ -58,8 +56,8 @@ class SafariBaseIE(InfoExtractor): 'next': '', } - request = compat_urllib_request.Request( - self._LOGIN_URL, compat_urllib_parse.urlencode(login_form), headers=headers) + request = sanitized_Request( + self._LOGIN_URL, urlencode_postdata(login_form), headers=headers) login_page = self._download_webpage( request, None, 'Logging in as %s' % username) @@ -112,11 +110,11 @@ class SafariIE(SafariBaseIE): '%s/%s/chapter-content/%s.html' % (self._API_BASE, course_id, part), part) - bc_url = BrightcoveIE._extract_brightcove_url(webpage) + bc_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) if not bc_url: raise ExtractorError('Could not extract Brightcove URL from %s' % url, expected=True) - return self.url_result(smuggle_url(bc_url, {'Referer': url}), 'Brightcove') + return self.url_result(smuggle_url(bc_url, {'Referer': url}), 'BrightcoveLegacy') class SafariCourseIE(SafariBaseIE): diff --git a/youtube_dl/extractor/sandia.py b/youtube_dl/extractor/sandia.py index 9c88167f0..759898a49 100644 --- a/youtube_dl/extractor/sandia.py +++ b/youtube_dl/extractor/sandia.py @@ -6,14 +6,12 @@ import json import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( int_or_none, js_to_json, mimetype2ext, + sanitized_Request, unified_strdate, ) @@ -37,7 +35,7 @@ class SandiaIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'MediasitePlayerCaps=ClientPlugins=4') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/senateisvp.py b/youtube_dl/extractor/senateisvp.py index 9c53704ea..474ebb49b 100644 --- a/youtube_dl/extractor/senateisvp.py +++ b/youtube_dl/extractor/senateisvp.py @@ -121,9 +121,9 @@ class SenateISVPIE(InfoExtractor): 'url': compat_urlparse.urljoin(domain, filename) + '?v=3.1.0&fp=&r=&g=', }] else: - hdcore_sign = '?hdcore=3.1.0' + hdcore_sign = 'hdcore=3.1.0' url_params = (domain, video_id, stream_num) - f4m_url = '%s/z/%s_1@%s/manifest.f4m' % url_params + hdcore_sign + f4m_url = '%s/z/%s_1@%s/manifest.f4m?' % url_params + hdcore_sign m3u8_url = '%s/i/%s_1@%s/master.m3u8' % url_params for entry in self._extract_f4m_formats(f4m_url, video_id, f4m_id='f4m'): # URLs without the extra param induce an 404 error diff --git a/youtube_dl/extractor/shared.py b/youtube_dl/extractor/shared.py index c5636e8e9..8eda3c864 100644 --- a/youtube_dl/extractor/shared.py +++ b/youtube_dl/extractor/shared.py @@ -3,13 +3,11 @@ from __future__ import unicode_literals import base64 from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) @@ -46,7 +44,7 @@ class SharedIE(InfoExtractor): 'Video %s does not exist' % video_id, expected=True) download_form = self._hidden_inputs(webpage) - request = compat_urllib_request.Request( + request = sanitized_Request( url, compat_urllib_parse.urlencode(download_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') diff --git a/youtube_dl/extractor/sharesix.py b/youtube_dl/extractor/sharesix.py index ac3e3adf2..f1ea9bdb2 100644 --- a/youtube_dl/extractor/sharesix.py +++ b/youtube_dl/extractor/sharesix.py @@ -4,12 +4,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( parse_duration, + sanitized_Request, ) @@ -50,7 +48,7 @@ class ShareSixIE(InfoExtractor): 'method_free': 'Free' } post = compat_urllib_parse.urlencode(fields) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage(req, video_id, diff --git a/youtube_dl/extractor/sina.py b/youtube_dl/extractor/sina.py index 0891a441f..b2258a0f6 100644 --- a/youtube_dl/extractor/sina.py +++ b/youtube_dl/extractor/sina.py @@ -4,10 +4,8 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse, -) +from ..compat import compat_urllib_parse +from ..utils import sanitized_Request class SinaIE(InfoExtractor): @@ -61,7 +59,7 @@ class SinaIE(InfoExtractor): if mobj.group('token') is not None: # The video id is in the redirected url self.to_screen('Getting video id') - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.get_method = lambda: 'HEAD' (_, urlh) = self._download_webpage_handle(request, 'NA', False) return self._real_extract(urlh.geturl()) diff --git a/youtube_dl/extractor/skynewsarabia.py b/youtube_dl/extractor/skynewsarabia.py new file mode 100644 index 000000000..05e1b02ad --- /dev/null +++ b/youtube_dl/extractor/skynewsarabia.py @@ -0,0 +1,117 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + parse_iso8601, + parse_duration, +) + + +class SkyNewsArabiaBaseIE(InfoExtractor): + _IMAGE_BASE_URL = 'http://www.skynewsarabia.com/web/images' + + def _call_api(self, path, value): + return self._download_json('http://api.skynewsarabia.com/web/rest/v2/%s/%s.json' % (path, value), value) + + def _get_limelight_media_id(self, url): + return self._search_regex(r'/media/[^/]+/([a-z0-9]{32})', url, 'limelight media id') + + def _get_image_url(self, image_path_template, width='1600', height='1200'): + return self._IMAGE_BASE_URL + image_path_template.format(width=width, height=height) + + def _extract_video_info(self, video_data): + video_id = compat_str(video_data['id']) + topic = video_data.get('topicTitle') + return { + '_type': 'url_transparent', + 'url': 'limelight:media:%s' % self._get_limelight_media_id(video_data['videoUrl'][0]['url']), + 'id': video_id, + 'title': video_data['headline'], + 'description': video_data.get('summary'), + 'thumbnail': self._get_image_url(video_data['mediaAsset']['imageUrl']), + 'timestamp': parse_iso8601(video_data.get('date')), + 'duration': parse_duration(video_data.get('runTime')), + 'tags': video_data.get('tags', []), + 'categories': [topic] if topic else [], + 'webpage_url': 'http://www.skynewsarabia.com/web/video/%s' % video_id, + 'ie_key': 'LimelightMedia', + } + + +class SkyNewsArabiaIE(SkyNewsArabiaBaseIE): + IE_NAME = 'skynewsarabia:video' + _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/video/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.skynewsarabia.com/web/video/794902/%D9%86%D8%B5%D9%81-%D9%85%D9%84%D9%8A%D9%88%D9%86-%D9%85%D8%B5%D8%A8%D8%A7%D8%AD-%D8%B4%D8%AC%D8%B1%D8%A9-%D9%83%D8%B1%D9%8A%D8%B3%D9%85%D8%A7%D8%B3', + 'info_dict': { + 'id': '794902', + 'ext': 'flv', + 'title': 'نصف مليون مصباح على شجرة كريسماس', + 'description': 'md5:22f1b27f0850eeb10c7e59b1f16eb7c6', + 'upload_date': '20151128', + 'timestamp': 1448697198, + 'duration': 2119, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + video_data = self._call_api('video', video_id) + return self._extract_video_info(video_data) + + +class SkyNewsArabiaArticleIE(SkyNewsArabiaBaseIE): + IE_NAME = 'skynewsarabia:video' + _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/article/(?P<id>[0-9]+)' + _TESTS = [{ + 'url': 'http://www.skynewsarabia.com/web/article/794549/%D8%A7%D9%94%D8%AD%D8%AF%D8%A7%D8%AB-%D8%A7%D9%84%D8%B4%D8%B1%D9%82-%D8%A7%D9%84%D8%A7%D9%94%D9%88%D8%B3%D8%B7-%D8%AE%D8%B1%D9%8A%D8%B7%D8%A9-%D8%A7%D9%84%D8%A7%D9%94%D9%84%D8%B9%D8%A7%D8%A8-%D8%A7%D9%84%D8%B0%D9%83%D9%8A%D8%A9', + 'info_dict': { + 'id': '794549', + 'ext': 'flv', + 'title': 'بالفيديو.. ألعاب ذكية تحاكي واقع المنطقة', + 'description': 'md5:0c373d29919a851e080ee4edd0c5d97f', + 'upload_date': '20151126', + 'timestamp': 1448559336, + 'duration': 281.6, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + 'url': 'http://www.skynewsarabia.com/web/article/794844/%D8%A7%D8%B3%D8%AA%D9%87%D8%AF%D8%A7%D9%81-%D9%82%D9%88%D8%A7%D8%B1%D8%A8-%D8%A7%D9%94%D8%B3%D9%84%D8%AD%D8%A9-%D9%84%D9%85%D9%8A%D9%84%D9%8A%D8%B4%D9%8A%D8%A7%D8%AA-%D8%A7%D9%84%D8%AD%D9%88%D8%AB%D9%8A-%D9%88%D8%B5%D8%A7%D9%84%D8%AD', + 'info_dict': { + 'id': '794844', + 'title': 'إحباط تهريب أسلحة لميليشيات الحوثي وصالح بجنوب اليمن', + 'description': 'md5:5c927b8b2e805796e7f693538d96fc7e', + }, + 'playlist_mincount': 2, + }] + + def _real_extract(self, url): + article_id = self._match_id(url) + article_data = self._call_api('article', article_id) + media_asset = article_data['mediaAsset'] + if media_asset['type'] == 'VIDEO': + topic = article_data.get('topicTitle') + return { + '_type': 'url_transparent', + 'url': 'limelight:media:%s' % self._get_limelight_media_id(media_asset['videoUrl'][0]['url']), + 'id': article_id, + 'title': article_data['headline'], + 'description': article_data.get('summary'), + 'thumbnail': self._get_image_url(media_asset['imageUrl']), + 'timestamp': parse_iso8601(article_data.get('date')), + 'tags': article_data.get('tags', []), + 'categories': [topic] if topic else [], + 'webpage_url': url, + 'ie_key': 'LimelightMedia', + } + entries = [self._extract_video_info(item) for item in article_data.get('inlineItems', []) if item['type'] == 'VIDEO'] + return self.playlist_result(entries, article_id, article_data['headline'], article_data.get('summary')) diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py index 35a81ee87..30210c8a3 100644 --- a/youtube_dl/extractor/smotri.py +++ b/youtube_dl/extractor/smotri.py @@ -7,13 +7,11 @@ import hashlib import uuid from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, unified_strdate, ) @@ -176,7 +174,7 @@ class SmotriIE(InfoExtractor): if video_password: video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest() - request = compat_urllib_request.Request( + request = sanitized_Request( 'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') @@ -339,7 +337,7 @@ class SmotriBroadcastIE(InfoExtractor): 'password': password, } - request = compat_urllib_request.Request( + request = sanitized_Request( broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') broadcast_page = self._download_webpage( diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py index ba2d5e19b..ea8fc258d 100644 --- a/youtube_dl/extractor/sohu.py +++ b/youtube_dl/extractor/sohu.py @@ -6,11 +6,11 @@ import re from .common import InfoExtractor from ..compat import ( compat_str, - compat_urllib_request, compat_urllib_parse, ) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -96,7 +96,7 @@ class SohuIE(InfoExtractor): else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' - req = compat_urllib_request.Request(base_data_url + vid_id) + req = sanitized_Request(base_data_url + vid_id) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') if cn_verification_proxy: @@ -158,6 +158,7 @@ class SohuIE(InfoExtractor): 'file': clips_url[i], 'new': su[i], 'prod': 'flash', + 'rb': 1, } if cdnId is not None: diff --git a/youtube_dl/extractor/soompi.py b/youtube_dl/extractor/soompi.py deleted file mode 100644 index 5da66ca9e..000000000 --- a/youtube_dl/extractor/soompi.py +++ /dev/null @@ -1,146 +0,0 @@ -# encoding: utf-8 -from __future__ import unicode_literals - -import re - -from .crunchyroll import CrunchyrollIE - -from .common import InfoExtractor -from ..compat import compat_HTTPError -from ..utils import ( - ExtractorError, - int_or_none, - remove_start, - xpath_text, -) - - -class SoompiBaseIE(InfoExtractor): - def _get_episodes(self, webpage, episode_filter=None): - episodes = self._parse_json( - self._search_regex( - r'VIDEOS\s*=\s*(\[.+?\]);', webpage, 'episodes JSON'), - None) - return list(filter(episode_filter, episodes)) - - -class SoompiIE(SoompiBaseIE, CrunchyrollIE): - IE_NAME = 'soompi' - _VALID_URL = r'https?://tv\.soompi\.com/(?:en/)?watch/(?P<id>[0-9]+)' - _TESTS = [{ - 'url': 'http://tv.soompi.com/en/watch/29235', - 'info_dict': { - 'id': '29235', - 'ext': 'mp4', - 'title': 'Episode 1096', - 'description': '2015-05-20' - }, - 'params': { - 'skip_download': True, - }, - }] - - def _get_episode(self, webpage, video_id): - return self._get_episodes(webpage, lambda x: x['id'] == video_id)[0] - - def _get_subtitles(self, config, video_id): - sub_langs = {} - for subtitle in config.findall('./{default}preload/subtitles/subtitle'): - sub_langs[subtitle.attrib['id']] = subtitle.attrib['title'] - - subtitles = {} - for s in config.findall('./{default}preload/subtitle'): - lang_code = sub_langs.get(s.attrib['id']) - if not lang_code: - continue - sub_id = s.get('id') - data = xpath_text(s, './data', 'data') - iv = xpath_text(s, './iv', 'iv') - if not id or not iv or not data: - continue - subtitle = self._decrypt_subtitles(data, iv, sub_id).decode('utf-8') - subtitles[lang_code] = self._extract_subtitles(subtitle) - return subtitles - - def _real_extract(self, url): - video_id = self._match_id(url) - - try: - webpage = self._download_webpage( - url, video_id, 'Downloading episode page') - except ExtractorError as ee: - if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: - webpage = ee.cause.read() - block_message = self._html_search_regex( - r'(?s)<div class="block-message">(.+?)</div>', webpage, - 'block message', default=None) - if block_message: - raise ExtractorError(block_message, expected=True) - raise - - formats = [] - config = None - for format_id in re.findall(r'\?quality=([0-9a-zA-Z]+)', webpage): - config = self._download_xml( - 'http://tv.soompi.com/en/show/_/%s-config.xml?mode=hls&quality=%s' % (video_id, format_id), - video_id, 'Downloading %s XML' % format_id) - m3u8_url = xpath_text( - config, './{default}preload/stream_info/file', - '%s m3u8 URL' % format_id) - if not m3u8_url: - continue - formats.extend(self._extract_m3u8_formats( - m3u8_url, video_id, 'mp4', m3u8_id=format_id)) - self._sort_formats(formats) - - episode = self._get_episode(webpage, video_id) - - title = episode['name'] - description = episode.get('description') - duration = int_or_none(episode.get('duration')) - - thumbnails = [{ - 'id': thumbnail_id, - 'url': thumbnail_url, - } for thumbnail_id, thumbnail_url in episode.get('img_url', {}).items()] - - subtitles = self.extract_subtitles(config, video_id) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnails': thumbnails, - 'duration': duration, - 'formats': formats, - 'subtitles': subtitles - } - - -class SoompiShowIE(SoompiBaseIE): - IE_NAME = 'soompi:show' - _VALID_URL = r'https?://tv\.soompi\.com/en/shows/(?P<id>[0-9a-zA-Z\-_]+)' - _TESTS = [{ - 'url': 'http://tv.soompi.com/en/shows/liar-game', - 'info_dict': { - 'id': 'liar-game', - 'title': 'Liar Game', - 'description': 'md5:52c02bce0c1a622a95823591d0589b66', - }, - 'playlist_count': 14, - }] - - def _real_extract(self, url): - show_id = self._match_id(url) - - webpage = self._download_webpage( - url, show_id, 'Downloading show page') - - title = remove_start(self._og_search_title(webpage), 'SoompiTV | ') - description = self._og_search_description(webpage) - - entries = [ - self.url_result('http://tv.soompi.com/en/watch/%s' % episode['id'], 'Soompi') - for episode in self._get_episodes(webpage)] - - return self.playlist_result(entries, show_id, title, description) diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index 2b60d354a..02e64e094 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -4,13 +4,17 @@ from __future__ import unicode_literals import re import itertools -from .common import InfoExtractor +from .common import ( + InfoExtractor, + SearchInfoExtractor +) from ..compat import ( compat_str, compat_urlparse, compat_urllib_parse, ) from ..utils import ( + encode_dict, ExtractorError, int_or_none, unified_strdate, @@ -469,3 +473,60 @@ class SoundcloudPlaylistIE(SoundcloudIE): 'description': data.get('description'), 'entries': entries, } + + +class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE): + IE_NAME = 'soundcloud:search' + IE_DESC = 'Soundcloud search' + _MAX_RESULTS = float('inf') + _TESTS = [{ + 'url': 'scsearch15:post-avant jazzcore', + 'info_dict': { + 'title': 'post-avant jazzcore', + }, + 'playlist_count': 15, + }] + + _SEARCH_KEY = 'scsearch' + _MAX_RESULTS_PER_PAGE = 200 + _DEFAULT_RESULTS_PER_PAGE = 50 + _API_V2_BASE = 'https://api-v2.soundcloud.com' + + def _get_collection(self, endpoint, collection_id, **query): + limit = min( + query.get('limit', self._DEFAULT_RESULTS_PER_PAGE), + self._MAX_RESULTS_PER_PAGE) + query['limit'] = limit + query['client_id'] = self._CLIENT_ID + query['linked_partitioning'] = '1' + query['offset'] = 0 + data = compat_urllib_parse.urlencode(encode_dict(query)) + next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data) + + collected_results = 0 + + for i in itertools.count(1): + response = self._download_json( + next_url, collection_id, 'Downloading page {0}'.format(i), + 'Unable to download API page') + + collection = response.get('collection', []) + if not collection: + break + + collection = list(filter(bool, collection)) + collected_results += len(collection) + + for item in collection: + yield self.url_result(item['uri'], SoundcloudIE.ie_key()) + + if not collection or collected_results >= limit: + break + + next_url = response.get('next_href') + if not next_url: + break + + def _get_n_results(self, query, n): + tracks = self._get_collection('/search/tracks', query, limit=n, q=query) + return self.playlist_result(tracks, playlist_title=query) diff --git a/youtube_dl/extractor/space.py b/youtube_dl/extractor/space.py index c2d0d36a6..ebb5d6ec0 100644 --- a/youtube_dl/extractor/space.py +++ b/youtube_dl/extractor/space.py @@ -3,14 +3,14 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from .brightcove import BrightcoveIE +from .brightcove import BrightcoveLegacyIE from ..utils import RegexNotFoundError, ExtractorError class SpaceIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m)\.)?space\.com/\d+-(?P<title>[^/\.\?]*?)-video\.html' _TEST = { - 'add_ie': ['Brightcove'], + 'add_ie': ['BrightcoveLegacy'], 'url': 'http://www.space.com/23373-huge-martian-landforms-detail-revealed-by-european-probe-video.html', 'info_dict': { 'id': '2780937028001', @@ -31,8 +31,8 @@ class SpaceIE(InfoExtractor): brightcove_url = self._og_search_video_url(webpage) except RegexNotFoundError: # Other videos works fine with the info from the object - brightcove_url = BrightcoveIE._extract_brightcove_url(webpage) + brightcove_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) if brightcove_url is None: raise ExtractorError( 'The webpage does not contain a video', expected=True) - return self.url_result(brightcove_url, BrightcoveIE.ie_key()) + return self.url_result(brightcove_url, BrightcoveLegacyIE.ie_key()) diff --git a/youtube_dl/extractor/spankwire.py b/youtube_dl/extractor/spankwire.py index 9e8fb35b2..692fd78e8 100644 --- a/youtube_dl/extractor/spankwire.py +++ b/youtube_dl/extractor/spankwire.py @@ -6,9 +6,9 @@ from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_urlparse, - compat_urllib_request, ) from ..utils import ( + sanitized_Request, str_to_int, unified_strdate, ) @@ -51,7 +51,7 @@ class SpankwireIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - req = compat_urllib_request.Request('http://www.' + mobj.group('url')) + req = sanitized_Request('http://www.' + mobj.group('url')) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py index 5bd3c0087..39a7aaf9d 100644 --- a/youtube_dl/extractor/spiegel.py +++ b/youtube_dl/extractor/spiegel.py @@ -58,7 +58,8 @@ class SpiegelIE(InfoExtractor): description = self._html_search_meta('description', webpage, 'description') base_url = self._search_regex( - r'var\s+server\s*=\s*"([^"]+)\"', webpage, 'server URL') + [r'server\s*:\s*(["\'])(?P<url>.+?)\1', r'var\s+server\s*=\s*"(?P<url>[^"]+)\"'], + webpage, 'server URL', group='url') xml_url = base_url + video_id + '.xml' idoc = self._download_xml(xml_url, video_id) diff --git a/youtube_dl/extractor/spiegeltv.py b/youtube_dl/extractor/spiegeltv.py index 27f4033c5..034bd47ff 100644 --- a/youtube_dl/extractor/spiegeltv.py +++ b/youtube_dl/extractor/spiegeltv.py @@ -77,17 +77,21 @@ class SpiegeltvIE(InfoExtractor): 'rtmp_live': True, }) elif determine_ext(endpoint) == 'm3u8': - m3u8_formats = self._extract_m3u8_formats( - endpoint.replace('[video]', play_path), - video_id, 'm4v', - preference=1, # Prefer hls since it allows to workaround georestriction - m3u8_id='hls', fatal=False) - if m3u8_formats is not False: - formats.extend(m3u8_formats) + formats.append({ + 'url': endpoint.replace('[video]', play_path), + 'ext': 'm4v', + 'format_id': 'hls', # Prefer hls since it allows to workaround georestriction + 'protocol': 'm3u8', + 'preference': 1, + 'http_headers': { + 'Accept-Encoding': 'deflate', # gzip causes trouble on the server side + }, + }) else: formats.append({ 'url': endpoint, }) + self._check_formats(formats, video_id) thumbnails = [] for image in media_json['images']: diff --git a/youtube_dl/extractor/sportdeutschland.py b/youtube_dl/extractor/sportdeutschland.py index 7ec6c613f..a9927f6e2 100644 --- a/youtube_dl/extractor/sportdeutschland.py +++ b/youtube_dl/extractor/sportdeutschland.py @@ -4,11 +4,9 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( parse_iso8601, + sanitized_Request, ) @@ -54,7 +52,7 @@ class SportDeutschlandIE(InfoExtractor): api_url = 'http://proxy.vidibusdynamic.net/sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % ( sport_id, video_id) - req = compat_urllib_request.Request(api_url, headers={ + req = sanitized_Request(api_url, headers={ 'Accept': 'application/vnd.vidibus.v2.html+json', 'Referer': url, }) @@ -72,10 +70,12 @@ class SportDeutschlandIE(InfoExtractor): smil_doc = self._download_xml( smil_url, video_id, note='Downloading SMIL metadata') - base_url = smil_doc.find('./head/meta').attrib['base'] + base_url_el = smil_doc.find('./head/meta') + if base_url_el: + base_url = base_url_el.attrib['base'] formats.extend([{ 'format_id': 'rmtp', - 'url': base_url, + 'url': base_url if base_url_el else n.attrib['src'], 'play_path': n.attrib['src'], 'ext': 'flv', 'preference': -100, diff --git a/youtube_dl/extractor/srf.py b/youtube_dl/extractor/srf.py deleted file mode 100644 index 77eec0bc7..000000000 --- a/youtube_dl/extractor/srf.py +++ /dev/null @@ -1,104 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re -from .common import InfoExtractor -from ..utils import ( - determine_ext, - parse_iso8601, - xpath_text, -) - - -class SrfIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/tv/[^/]+/video/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})' - _TESTS = [{ - 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', - 'md5': '4cd93523723beff51bb4bee974ee238d', - 'info_dict': { - 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5', - 'display_id': 'snowden-beantragt-asyl-in-russland', - 'ext': 'm4v', - 'upload_date': '20130701', - 'title': 'Snowden beantragt Asyl in Russland', - 'timestamp': 1372713995, - } - }, { - # No Speichern (Save) button - 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa', - 'md5': 'd97e236e80d1d24729e5d0953d276a4f', - 'info_dict': { - 'id': '677f5829-e473-4823-ac83-a1087fe97faa', - 'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive', - 'ext': 'flv', - 'upload_date': '20130710', - 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive', - 'timestamp': 1373493600, - }, - }, { - 'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', - 'only_matching': True, - }, { - 'url': 'https://tp.srgssr.ch/p/flash?urn=urn:srf:ais:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5', - 'only_matching': True, - }] - - def _real_extract(self, url): - video_id = self._match_id(url) - display_id = re.match(self._VALID_URL, url).group('display_id') or video_id - - video_data = self._download_xml( - 'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/video/play/%s.xml' % video_id, - display_id) - - title = xpath_text( - video_data, './AssetMetadatas/AssetMetadata/title', fatal=True) - thumbnails = [{ - 'url': s.text - } for s in video_data.findall('.//ImageRepresentation/url')] - timestamp = parse_iso8601(xpath_text(video_data, './createdDate')) - # The <duration> field in XML is different from the exact duration, skipping - - formats = [] - for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'): - for url_node in item.findall('url'): - quality = url_node.attrib['quality'] - full_url = url_node.text - original_ext = determine_ext(full_url) - format_id = '%s-%s' % (quality, item.attrib['protocol']) - if original_ext == 'f4m': - formats.extend(self._extract_f4m_formats( - full_url + '?hdcore=3.4.0', display_id, f4m_id=format_id)) - elif original_ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( - full_url, display_id, 'mp4', m3u8_id=format_id)) - else: - formats.append({ - 'url': full_url, - 'ext': original_ext, - 'format_id': format_id, - 'quality': 0 if 'HD' in quality else -1, - 'preference': 1, - }) - - self._sort_formats(formats) - - subtitles = {} - subtitles_data = video_data.find('Subtitles') - if subtitles_data is not None: - subtitles_list = [{ - 'url': sub.text, - 'ext': determine_ext(sub.text), - } for sub in subtitles_data] - if subtitles_list: - subtitles['de'] = subtitles_list - - return { - 'id': video_id, - 'display_id': display_id, - 'formats': formats, - 'title': title, - 'thumbnails': thumbnails, - 'timestamp': timestamp, - 'subtitles': subtitles, - } diff --git a/youtube_dl/extractor/srgssr.py b/youtube_dl/extractor/srgssr.py new file mode 100644 index 000000000..4707029ca --- /dev/null +++ b/youtube_dl/extractor/srgssr.py @@ -0,0 +1,158 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + parse_iso8601, + qualities, +) + + +class SRGSSRIE(InfoExtractor): + _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)' + + _ERRORS = { + 'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.', + 'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.', + # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.', + 'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.', + 'LEGAL': 'The video cannot be transmitted for legal reasons.', + 'STARTDATE': 'This video is not yet available. Please try again later.', + } + + def get_media_data(self, bu, media_type, media_id): + media_data = self._download_json( + 'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id), + media_id)[media_type.capitalize()] + + if media_data.get('block') and media_data['block'] in self._ERRORS: + raise ExtractorError('%s said: %s' % ( + self.IE_NAME, self._ERRORS[media_data['block']]), expected=True) + + return media_data + + def _real_extract(self, url): + bu, media_type, media_id = re.match(self._VALID_URL, url).groups() + + if bu == 'rts': + return self.url_result('rts:%s' % media_id, 'RTS') + + media_data = self.get_media_data(bu, media_type, media_id) + + metadata = media_data['AssetMetadatas']['AssetMetadata'][0] + title = metadata['title'] + description = metadata.get('description') + created_date = media_data.get('createdDate') or metadata.get('createdDate') + timestamp = parse_iso8601(created_date) + + thumbnails = [{ + 'id': image.get('id'), + 'url': image['url'], + } for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])] + + preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD']) + formats = [] + for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []): + protocol = source.get('@protocol') + for asset in source['url']: + asset_url = asset['text'] + quality = asset['@quality'] + format_id = '%s-%s' % (protocol, quality) + if protocol == 'HTTP-HDS': + formats.extend(self._extract_f4m_formats( + asset_url + '?hdcore=3.4.0', media_id, + f4m_id=format_id, fatal=False)) + elif protocol == 'HTTP-HLS': + formats.extend(self._extract_m3u8_formats( + asset_url, media_id, 'mp4', 'm3u8_native', + m3u8_id=format_id, fatal=False)) + else: + ext = None + if protocol == 'RTMP': + ext = self._search_regex(r'([a-z0-9]+):[^/]+', asset_url, 'ext') + formats.append({ + 'format_id': format_id, + 'url': asset_url, + 'preference': preference(quality), + 'ext': ext, + }) + self._sort_formats(formats) + + return { + 'id': media_id, + 'title': title, + 'description': description, + 'timestamp': timestamp, + 'thumbnails': thumbnails, + 'formats': formats, + } + + +class SRGSSRPlayIE(InfoExtractor): + IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites' + _VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)' + + _TESTS = [{ + 'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5', + 'md5': '4cd93523723beff51bb4bee974ee238d', + 'info_dict': { + 'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5', + 'ext': 'm4v', + 'upload_date': '20130701', + 'title': 'Snowden beantragt Asyl in Russland', + 'timestamp': 1372713995, + } + }, { + # No Speichern (Save) button + 'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa', + 'md5': '0a274ce38fda48c53c01890651985bc6', + 'info_dict': { + 'id': '677f5829-e473-4823-ac83-a1087fe97faa', + 'ext': 'flv', + 'upload_date': '20130710', + 'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive', + 'description': 'md5:88604432b60d5a38787f152dec89cd56', + 'timestamp': 1373493600, + }, + }, { + 'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc', + 'info_dict': { + 'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc', + 'ext': 'mp3', + 'upload_date': '20151013', + 'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem', + 'timestamp': 1444750398, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + 'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260', + 'md5': '67a2a9ae4e8e62a68d0e9820cc9782df', + 'info_dict': { + 'id': '6348260', + 'display_id': '6348260', + 'ext': 'mp4', + 'duration': 1796, + 'title': 'Le 19h30', + 'description': '', + 'uploader': '19h30', + 'upload_date': '20141201', + 'timestamp': 1417458600, + 'thumbnail': 're:^https?://.*\.image', + 'view_count': int, + }, + 'params': { + # m3u8 download + 'skip_download': True, + } + }] + + def _real_extract(self, url): + bu, media_type, media_id = re.match(self._VALID_URL, url).groups() + # other info can be extracted from url + '&layout=json' + return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR') diff --git a/youtube_dl/extractor/srmediathek.py b/youtube_dl/extractor/srmediathek.py index 5d583c720..74d01183f 100644 --- a/youtube_dl/extractor/srmediathek.py +++ b/youtube_dl/extractor/srmediathek.py @@ -1,17 +1,18 @@ # encoding: utf-8 from __future__ import unicode_literals -import json +from .ard import ARDMediathekIE +from ..utils import ( + ExtractorError, + get_element_by_attribute, +) -from .common import InfoExtractor -from ..utils import js_to_json - -class SRMediathekIE(InfoExtractor): +class SRMediathekIE(ARDMediathekIE): IE_DESC = 'Saarländischer Rundfunk' _VALID_URL = r'https?://sr-mediathek\.sr-online\.de/index\.php\?.*?&id=(?P<id>[0-9]+)' - _TEST = { + _TESTS = [{ 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455', 'info_dict': { 'id': '28455', @@ -20,24 +21,36 @@ class SRMediathekIE(InfoExtractor): 'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ', 'thumbnail': 're:^https?://.*\.jpg$', }, - } + 'skip': 'no longer available', + }, { + 'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=37682', + 'info_dict': { + 'id': '37682', + 'ext': 'mp4', + 'title': 'Love, Cakes and Rock\'n\'Roll', + 'description': 'md5:18bf9763631c7d326c22603681e1123d', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + 'expected_warnings': ['Unable to download f4m manifest'] + }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - murls = json.loads(js_to_json(self._search_regex( - r'var mediaURLs\s*=\s*(.*?);\n', webpage, 'video URLs'))) - formats = [{'url': murl} for murl in murls] - self._sort_formats(formats) - - title = json.loads(js_to_json(self._search_regex( - r'var mediaTitles\s*=\s*(.*?);\n', webpage, 'title')))[0] + if '>Der gewünschte Beitrag ist leider nicht mehr verfügbar.<' in webpage: + raise ExtractorError('Video %s is no longer available' % video_id, expected=True) - return { + media_collection_url = self._search_regex( + r'data-mediacollection-ardplayer="([^"]+)"', webpage, 'media collection url') + info = self._extract_media_info(media_collection_url, webpage, video_id) + info.update({ 'id': video_id, - 'title': title, - 'formats': formats, + 'title': get_element_by_attribute('class', 'ardplayer-title', webpage), 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), - } + }) + return info diff --git a/youtube_dl/extractor/stitcher.py b/youtube_dl/extractor/stitcher.py new file mode 100644 index 000000000..d5c852f52 --- /dev/null +++ b/youtube_dl/extractor/stitcher.py @@ -0,0 +1,81 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + int_or_none, + js_to_json, + unescapeHTML, +) + + +class StitcherIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)' + _TESTS = [{ + 'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true', + 'md5': '391dd4e021e6edeb7b8e68fbf2e9e940', + 'info_dict': { + 'id': '40789481', + 'ext': 'mp3', + 'title': 'Machine Learning Mastery and Cancer Clusters', + 'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3', + 'duration': 1604, + 'thumbnail': 're:^https?://.*\.jpg', + }, + }, { + 'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true', + 'info_dict': { + 'id': '40846275', + 'display_id': 'the-rare-hourlong-comedy-plus', + 'ext': 'mp3', + 'title': "The CW's 'Crazy Ex-Girlfriend'", + 'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17', + 'duration': 2235, + 'thumbnail': 're:^https?://.*\.jpg', + }, + 'params': { + 'skip_download': True, + }, + }, { + # escaped title + 'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true', + 'only_matching': True, + }, { + 'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + audio_id = mobj.group('id') + display_id = mobj.group('display_id') or audio_id + + webpage = self._download_webpage(url, display_id) + + episode = self._parse_json( + js_to_json(self._search_regex( + r'(?s)var\s+stitcher\s*=\s*({.+?});\n', webpage, 'episode config')), + display_id)['config']['episode'] + + title = unescapeHTML(episode['title']) + formats = [{ + 'url': episode[episode_key], + 'ext': determine_ext(episode[episode_key]) or 'mp3', + 'vcodec': 'none', + } for episode_key in ('episodeURL',) if episode.get(episode_key)] + description = self._search_regex( + r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False) + duration = int_or_none(episode.get('duration')) + thumbnail = episode.get('episodeImage') + + return { + 'id': audio_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'duration': duration, + 'thumbnail': thumbnail, + 'formats': formats, + } diff --git a/youtube_dl/extractor/streamcloud.py b/youtube_dl/extractor/streamcloud.py index d4e134015..77841b946 100644 --- a/youtube_dl/extractor/streamcloud.py +++ b/youtube_dl/extractor/streamcloud.py @@ -4,10 +4,8 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse +from ..utils import sanitized_Request class StreamcloudIE(InfoExtractor): @@ -43,7 +41,7 @@ class StreamcloudIE(InfoExtractor): headers = { b'Content-Type': b'application/x-www-form-urlencoded', } - req = compat_urllib_request.Request(url, post, headers) + req = sanitized_Request(url, post, headers) webpage = self._download_webpage( req, video_id, note='Downloading video page ...') diff --git a/youtube_dl/extractor/streamcz.py b/youtube_dl/extractor/streamcz.py index e92b93285..d3d2b7eb7 100644 --- a/youtube_dl/extractor/streamcz.py +++ b/youtube_dl/extractor/streamcz.py @@ -5,11 +5,9 @@ import hashlib import time from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( int_or_none, + sanitized_Request, ) @@ -54,7 +52,7 @@ class StreamCZIE(InfoExtractor): video_id = self._match_id(url) api_path = '/episode/%s' % video_id - req = compat_urllib_request.Request(self._API_URL + api_path) + req = sanitized_Request(self._API_URL + api_path) req.add_header('Api-Password', _get_api_key(api_path)) data = self._download_json(req, video_id) diff --git a/youtube_dl/extractor/tapely.py b/youtube_dl/extractor/tapely.py index f1f43d0a7..ed560bd24 100644 --- a/youtube_dl/extractor/tapely.py +++ b/youtube_dl/extractor/tapely.py @@ -4,19 +4,17 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( clean_html, ExtractorError, float_or_none, parse_iso8601, + sanitized_Request, ) class TapelyIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?tape\.ly/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?' + _VALID_URL = r'https?://(?:www\.)?(?:tape\.ly|tapely\.com)/(?P<id>[A-Za-z0-9\-_]+)(?:/(?P<songnr>\d+))?' _API_URL = 'http://tape.ly/showtape?id={0:}' _S3_SONG_URL = 'http://mytape.s3.amazonaws.com/{0:}' _SOUNDCLOUD_SONG_URL = 'http://api.soundcloud.com{0:}' @@ -42,6 +40,10 @@ class TapelyIE(InfoExtractor): 'ext': 'm4a', }, }, + { + 'url': 'https://tapely.com/my-grief-as-told-by-water', + 'only_matching': True, + }, ] def _real_extract(self, url): @@ -49,7 +51,7 @@ class TapelyIE(InfoExtractor): display_id = mobj.group('id') playlist_url = self._API_URL.format(display_id) - request = compat_urllib_request.Request(playlist_url) + request = sanitized_Request(playlist_url) request.add_header('X-Requested-With', 'XMLHttpRequest') request.add_header('Accept', 'application/json') request.add_header('Referer', url) diff --git a/youtube_dl/extractor/teachingchannel.py b/youtube_dl/extractor/teachingchannel.py index 117afa9bf..e0477382c 100644 --- a/youtube_dl/extractor/teachingchannel.py +++ b/youtube_dl/extractor/teachingchannel.py @@ -16,6 +16,7 @@ class TeachingChannelIE(InfoExtractor): 'ext': 'mp4', 'title': 'A History of Teaming', 'description': 'md5:2a9033db8da81f2edffa4c99888140b3', + 'duration': 422.255, }, 'params': { # m3u8 download diff --git a/youtube_dl/extractor/tele13.py b/youtube_dl/extractor/tele13.py new file mode 100644 index 000000000..4e860db0a --- /dev/null +++ b/youtube_dl/extractor/tele13.py @@ -0,0 +1,88 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from .youtube import YoutubeIE +from ..utils import ( + js_to_json, + qualities, + determine_ext, +) + + +class Tele13IE(InfoExtractor): + _VALID_URL = r'^http://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)' + _TESTS = [ + { + 'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', + 'md5': '4cb1fa38adcad8fea88487a078831755', + 'info_dict': { + 'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda', + 'ext': 'mp4', + 'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda', + }, + 'params': { + # HTTP Error 404: Not Found + 'skip_download': True, + }, + }, + { + 'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok', + 'md5': '867adf6a3b3fef932c68a71d70b70946', + 'info_dict': { + 'id': 'rOoKv2OMpOw', + 'ext': 'mp4', + 'title': 'Shooting star seen on 7-Sep-2015', + 'description': 'md5:7292ff2a34b2f673da77da222ae77e1e', + 'uploader': 'Porjai Jaturongkhakun', + 'upload_date': '20150906', + 'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw', + }, + 'add_ie': ['Youtube'], + } + ] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + setup_js = self._search_regex( + r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)", + webpage, 'setup code') + sources = self._parse_json(self._search_regex( + r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'), + display_id, js_to_json) + + preference = qualities(['Móvil', 'SD', 'HD']) + formats = [] + urls = [] + for f in sources: + format_url = f['file'] + if format_url and format_url not in urls: + ext = determine_ext(format_url) + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + format_url, display_id, 'mp4', 'm3u8_native', + m3u8_id='hls', fatal=False)) + elif YoutubeIE.suitable(format_url): + return self.url_result(format_url, 'Youtube') + else: + formats.append({ + 'url': format_url, + 'format_id': f.get('label'), + 'preference': preference(f.get('label')), + 'ext': ext, + }) + urls.append(format_url) + self._sort_formats(formats) + + return { + 'id': display_id, + 'title': self._search_regex( + r'title\s*:\s*"([^"]+)"', setup_js, 'title'), + 'description': self._html_search_meta( + 'description', webpage, 'description'), + 'thumbnail': self._search_regex( + r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None), + 'formats': formats, + } diff --git a/youtube_dl/extractor/tf1.py b/youtube_dl/extractor/tf1.py index 3a68eaa80..6890021cf 100644 --- a/youtube_dl/extractor/tf1.py +++ b/youtube_dl/extractor/tf1.py @@ -6,7 +6,7 @@ from .common import InfoExtractor class TF1IE(InfoExtractor): """TF1 uses the wat.tv player.""" - _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/.*?-(?P<id>\d+)(?:-\d+)?\.html' + _VALID_URL = r'http://(?:(?:videos|www|lci)\.tf1|www\.tfou)\.fr/(?:[^/]+/)*(?P<id>.+?)\.html' _TESTS = [{ 'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html', 'info_dict': { @@ -22,7 +22,7 @@ class TF1IE(InfoExtractor): }, { 'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html', 'info_dict': { - 'id': '12043945', + 'id': 'le-grand-mysterioso-chuggington-7085291-739', 'ext': 'mp4', 'title': 'Le grand Mystérioso - Chuggington', 'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.', @@ -32,22 +32,24 @@ class TF1IE(InfoExtractor): # Sometimes wat serves the whole file with the --test option 'skip_download': True, }, + 'skip': 'HTTP Error 410: Gone', }, { 'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html', 'only_matching': True, }, { 'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html', 'only_matching': True, + }, { + 'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html', + 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - embed_url = self._html_search_regex( - r'["\'](https?://www.wat.tv/embedframe/.*?)["\']', webpage, 'embed url') - embed_page = self._download_webpage(embed_url, video_id, - 'Downloading embed player page') - wat_id = self._search_regex(r'UVID=(.*?)&', embed_page, 'wat id') + wat_id = self._html_search_regex( + r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1', + webpage, 'wat id', group='id') wat_info = self._download_json( 'http://www.wat.tv/interface/contentv3/%s' % wat_id, video_id) return self.url_result(wat_info['media']['url'], 'Wat') diff --git a/youtube_dl/extractor/theintercept.py b/youtube_dl/extractor/theintercept.py new file mode 100644 index 000000000..8cb3c3669 --- /dev/null +++ b/youtube_dl/extractor/theintercept.py @@ -0,0 +1,49 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + parse_iso8601, + int_or_none, + ExtractorError, +) + + +class TheInterceptIE(InfoExtractor): + _VALID_URL = r'https://theintercept.com/fieldofvision/(?P<id>[^/?#]+)' + _TESTS = [{ + 'url': 'https://theintercept.com/fieldofvision/thisisacoup-episode-four-surrender-or-die/', + 'md5': '145f28b41d44aab2f87c0a4ac8ec95bd', + 'info_dict': { + 'id': '46214', + 'ext': 'mp4', + 'title': '#ThisIsACoup – Episode Four: Surrender or Die', + 'description': 'md5:74dd27f0e2fbd50817829f97eaa33140', + 'timestamp': 1450429239, + 'upload_date': '20151218', + 'comment_count': int, + } + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + json_data = self._parse_json(self._search_regex( + r'initialStoreTree\s*=\s*(?P<json_data>{.+})', webpage, + 'initialStoreTree'), display_id) + + for post in json_data['resources']['posts'].values(): + if post['slug'] == display_id: + return { + '_type': 'url_transparent', + 'url': 'jwplatform:%s' % post['fov_videoid'], + 'id': compat_str(post['ID']), + 'display_id': display_id, + 'title': post['title'], + 'description': post.get('excerpt'), + 'timestamp': parse_iso8601(post.get('date')), + 'comment_count': int_or_none(post.get('comments_number')), + } + raise ExtractorError('Unable to find the current post') diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py index 25edc3100..0bf6726b5 100644 --- a/youtube_dl/extractor/theplatform.py +++ b/youtube_dl/extractor/theplatform.py @@ -16,11 +16,12 @@ from ..compat import ( from ..utils import ( determine_ext, ExtractorError, - xpath_with_ns, - unsmuggle_url, + float_or_none, int_or_none, + sanitized_Request, + unsmuggle_url, url_basename, - float_or_none, + xpath_with_ns, ) default_ns = 'http://www.w3.org/2005/SMIL21/Language' @@ -139,6 +140,11 @@ class ThePlatformIE(ThePlatformBaseIE): 'upload_date': '20150701', 'categories': ['Today/Shows/Orange Room', 'Today/Sections/Money', 'Today/Topics/Tech', "Today/Topics/Editor's picks"], }, + }, { + # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1 + # geo-restricted (US), HLS encrypted with AES-128 + 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781', + 'only_matching': True, }] @staticmethod @@ -182,8 +188,12 @@ class ThePlatformIE(ThePlatformBaseIE): # Seems there's no pattern for the interested script filename, so # I try one by one for script in reversed(scripts): - feed_script = self._download_webpage(script, video_id, 'Downloading feed script') - feed_id = self._search_regex(r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None) + feed_script = self._download_webpage( + self._proto_relative_url(script, 'http:'), + video_id, 'Downloading feed script') + feed_id = self._search_regex( + r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, + 'default feed id', default=None) if feed_id is not None: break if feed_id is None: @@ -193,6 +203,20 @@ class ThePlatformIE(ThePlatformBaseIE): if smuggled_data.get('force_smil_url', False): smil_url = url + # Explicitly specified SMIL (see https://github.com/rg3/youtube-dl/issues/7385) + elif '/guid/' in url: + headers = {} + source_url = smuggled_data.get('source_url') + if source_url: + headers['Referer'] = source_url + request = sanitized_Request(url, headers=headers) + webpage = self._download_webpage(request, video_id) + smil_url = self._search_regex( + r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml', + webpage, 'smil url', group='url') + path = self._search_regex( + r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path') + smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4&format=SMIL' elif mobj.group('config'): config_url = url + '&form=json' config_url = config_url.replace('swf/', 'config/') diff --git a/youtube_dl/extractor/tlc.py b/youtube_dl/extractor/tlc.py index 13263614c..d6d038a8d 100644 --- a/youtube_dl/extractor/tlc.py +++ b/youtube_dl/extractor/tlc.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from .brightcove import BrightcoveIE +from .brightcove import BrightcoveLegacyIE from .discovery import DiscoveryIE from ..compat import compat_urlparse @@ -66,6 +66,6 @@ class TlcDeIE(InfoExtractor): return { '_type': 'url', - 'url': BrightcoveIE._extract_brightcove_url(iframe), - 'ie': BrightcoveIE.ie_key(), + 'url': BrightcoveLegacyIE._extract_brightcove_url(iframe), + 'ie': BrightcoveLegacyIE.ie_key(), } diff --git a/youtube_dl/extractor/toggle.py b/youtube_dl/extractor/toggle.py new file mode 100644 index 000000000..c54b876d3 --- /dev/null +++ b/youtube_dl/extractor/toggle.py @@ -0,0 +1,192 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + ExtractorError, + float_or_none, + int_or_none, + parse_iso8601, + sanitized_Request, +) + + +class ToggleIE(InfoExtractor): + IE_NAME = 'toggle' + _VALID_URL = r'https?://video\.toggle\.sg/(?:en|zh)/(?:series|clips|movies)/(?:[^/]+/)+(?P<id>[0-9]+)' + _TESTS = [{ + 'url': 'http://video.toggle.sg/en/series/lion-moms-tif/trailers/lion-moms-premier/343115', + 'info_dict': { + 'id': '343115', + 'ext': 'mp4', + 'title': 'Lion Moms Premiere', + 'description': 'md5:aea1149404bff4d7f7b6da11fafd8e6b', + 'upload_date': '20150910', + 'timestamp': 1441858274, + }, + 'params': { + 'skip_download': 'm3u8 download', + } + }, { + 'note': 'DRM-protected video', + 'url': 'http://video.toggle.sg/en/movies/dug-s-special-mission/341413', + 'info_dict': { + 'id': '341413', + 'ext': 'wvm', + 'title': 'Dug\'s Special Mission', + 'description': 'md5:e86c6f4458214905c1772398fabc93e0', + 'upload_date': '20150827', + 'timestamp': 1440644006, + }, + 'params': { + 'skip_download': 'DRM-protected wvm download', + } + }, { + # this also tests correct video id extraction + 'note': 'm3u8 links are geo-restricted, but Android/mp4 is okay', + 'url': 'http://video.toggle.sg/en/series/28th-sea-games-5-show/28th-sea-games-5-show-ep11/332861', + 'info_dict': { + 'id': '332861', + 'ext': 'mp4', + 'title': '28th SEA Games (5 Show) - Episode 11', + 'description': 'md5:3cd4f5f56c7c3b1340c50a863f896faa', + 'upload_date': '20150605', + 'timestamp': 1433480166, + }, + 'params': { + 'skip_download': 'DRM-protected wvm download', + }, + 'skip': 'm3u8 links are geo-restricted' + }, { + 'url': 'http://video.toggle.sg/en/clips/seraph-sun-aloysius-will-suddenly-sing-some-old-songs-in-high-pitch-on-set/343331', + 'only_matching': True, + }, { + 'url': 'http://video.toggle.sg/zh/series/zero-calling-s2-hd/ep13/336367', + 'only_matching': True, + }, { + 'url': 'http://video.toggle.sg/en/series/vetri-s2/webisodes/jeeva-is-an-orphan-vetri-s2-webisode-7/342302', + 'only_matching': True, + }, { + 'url': 'http://video.toggle.sg/en/movies/seven-days/321936', + 'only_matching': True, + }] + + _FORMAT_PREFERENCES = { + 'wvm-STBMain': -10, + 'wvm-iPadMain': -20, + 'wvm-iPhoneMain': -30, + 'wvm-Android': -40, + } + _API_USER = 'tvpapi_147' + _API_PASS = '11111' + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + url, video_id, note='Downloading video page') + + api_user = self._search_regex( + r'apiUser\s*:\s*(["\'])(?P<user>.+?)\1', webpage, 'apiUser', + default=self._API_USER, group='user') + api_pass = self._search_regex( + r'apiPass\s*:\s*(["\'])(?P<pass>.+?)\1', webpage, 'apiPass', + default=self._API_PASS, group='pass') + + params = { + 'initObj': { + 'Locale': { + 'LocaleLanguage': '', + 'LocaleCountry': '', + 'LocaleDevice': '', + 'LocaleUserState': 0 + }, + 'Platform': 0, + 'SiteGuid': 0, + 'DomainID': '0', + 'UDID': '', + 'ApiUser': api_user, + 'ApiPass': api_pass + }, + 'MediaID': video_id, + 'mediaType': 0, + } + + req = sanitized_Request( + 'http://tvpapi.as.tvinci.com/v2_9/gateways/jsonpostgw.aspx?m=GetMediaInfo', + json.dumps(params).encode('utf-8')) + info = self._download_json(req, video_id, 'Downloading video info json') + + title = info['MediaName'] + + formats = [] + for video_file in info.get('Files', []): + video_url, vid_format = video_file.get('URL'), video_file.get('Format') + if not video_url or not vid_format: + continue + ext = determine_ext(video_url) + vid_format = vid_format.replace(' ', '') + # if geo-restricted, m3u8 is inaccessible, but mp4 is okay + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + video_url, video_id, ext='mp4', m3u8_id=vid_format, + note='Downloading %s m3u8 information' % vid_format, + errnote='Failed to download %s m3u8 information' % vid_format, + fatal=False)) + elif ext in ('mp4', 'wvm'): + # wvm are drm-protected files + formats.append({ + 'ext': ext, + 'url': video_url, + 'format_id': vid_format, + 'preference': self._FORMAT_PREFERENCES.get(ext + '-' + vid_format) or -1, + 'format_note': 'DRM-protected video' if ext == 'wvm' else None + }) + if not formats: + # Most likely because geo-blocked + raise ExtractorError('No downloadable videos found', expected=True) + self._sort_formats(formats) + + duration = int_or_none(info.get('Duration')) + description = info.get('Description') + created_at = parse_iso8601(info.get('CreationDate') or None) + + average_rating = float_or_none(info.get('Rating')) + view_count = int_or_none(info.get('ViewCounter') or info.get('view_counter')) + like_count = int_or_none(info.get('LikeCounter') or info.get('like_counter')) + + thumbnails = [] + for picture in info.get('Pictures', []): + if not isinstance(picture, dict): + continue + pic_url = picture.get('URL') + if not pic_url: + continue + thumbnail = { + 'url': pic_url, + } + pic_size = picture.get('PicSize', '') + m = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', pic_size) + if m: + thumbnail.update({ + 'width': int(m.group('width')), + 'height': int(m.group('height')), + }) + thumbnails.append(thumbnail) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'duration': duration, + 'timestamp': created_at, + 'average_rating': average_rating, + 'view_count': view_count, + 'like_count': like_count, + 'thumbnails': thumbnails, + 'formats': formats, + } diff --git a/youtube_dl/extractor/trilulilu.py b/youtube_dl/extractor/trilulilu.py index 185accc4b..a800449e9 100644 --- a/youtube_dl/extractor/trilulilu.py +++ b/youtube_dl/extractor/trilulilu.py @@ -1,80 +1,103 @@ # coding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + int_or_none, + parse_iso8601, +) class TriluliluIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?trilulilu\.ro/(?:video-[^/]+/)?(?P<id>[^/#\?]+)' - _TEST = { - 'url': 'http://www.trilulilu.ro/video-animatie/big-buck-bunny-1', - 'md5': 'c1450a00da251e2769b74b9005601cac', + _VALID_URL = r'https?://(?:(?:www|m)\.)?trilulilu\.ro/(?:[^/]+/)?(?P<id>[^/#\?]+)' + _TESTS = [{ + 'url': 'http://www.trilulilu.ro/big-buck-bunny-1', + 'md5': '68da087b676a6196a413549212f60cc6', 'info_dict': { 'id': 'ae2899e124140b', 'ext': 'mp4', 'title': 'Big Buck Bunny', 'description': ':) pentru copilul din noi', + 'uploader_id': 'chipy', + 'upload_date': '20120304', + 'timestamp': 1330830647, + 'uploader': 'chipy', + 'view_count': int, + 'like_count': int, + 'comment_count': int, }, - } + }, { + 'url': 'http://www.trilulilu.ro/adena-ft-morreti-inocenta', + 'md5': '929dfb8729dc71750463af88bbbbf4a4', + 'info_dict': { + 'id': 'f299710e3c91c5', + 'ext': 'mp4', + 'title': 'Adena ft. Morreti - Inocenta', + 'description': 'pop music', + 'uploader_id': 'VEVOmixt', + 'upload_date': '20151204', + 'uploader': 'VEVOmixt', + 'timestamp': 1449187937, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + }] def _real_extract(self, url): display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) + media_info = self._download_json('http://m.trilulilu.ro/%s?format=json' % display_id, display_id) - if re.search(r'Fişierul nu este disponibil pentru vizionare în ţara dumneavoastră', webpage): - raise ExtractorError( - 'This video is not available in your country.', expected=True) - elif re.search('Fişierul poate fi accesat doar de către prietenii lui', webpage): + age_limit = 0 + errors = media_info.get('errors', {}) + if errors.get('friends'): raise ExtractorError('This video is private.', expected=True) + elif errors.get('geoblock'): + raise ExtractorError('This video is not available in your country.', expected=True) + elif errors.get('xxx_unlogged'): + age_limit = 18 - flashvars_str = self._search_regex( - r'block_flash_vars\s*=\s*(\{[^\}]+\})', webpage, 'flashvars', fatal=False, default=None) + media_class = media_info.get('class') + if media_class not in ('video', 'audio'): + raise ExtractorError('not a video or an audio') - if flashvars_str: - flashvars = self._parse_json(flashvars_str, display_id) - else: - raise ExtractorError( - 'This page does not contain videos', expected=True) + user = media_info.get('user', {}) - if flashvars['isMP3'] == 'true': - raise ExtractorError( - 'Audio downloads are currently not supported', expected=True) + thumbnail = media_info.get('cover_url') + if thumbnail: + thumbnail.format(width='1600', height='1200') - video_id = flashvars['hash'] - title = self._og_search_title(webpage) - thumbnail = self._og_search_thumbnail(webpage) - description = self._og_search_description(webpage, default=None) - - format_url = ('http://fs%(server)s.trilulilu.ro/%(hash)s/' - 'video-formats2' % flashvars) - format_doc = self._download_xml( - format_url, video_id, - note='Downloading formats', - errnote='Error while downloading formats') - - video_url_template = ( - 'http://fs%(server)s.trilulilu.ro/stream.php?type=video' - '&source=site&hash=%(hash)s&username=%(userid)s&' - 'key=ministhebest&format=%%s&sig=&exp=' % - flashvars) - formats = [ - { - 'format_id': fnode.text.partition('-')[2], - 'url': video_url_template % fnode.text, - 'ext': fnode.text.partition('-')[0] - } - - for fnode in format_doc.findall('./formats/format') - ] + # TODO: get correct ext for audio files + stream_type = media_info.get('stream_type') + formats = [{ + 'url': media_info['href'], + 'ext': stream_type, + }] + if media_info.get('is_hd'): + formats.append({ + 'format_id': 'hd', + 'url': media_info['hrefhd'], + 'ext': stream_type, + }) + if media_class == 'audio': + formats[0]['vcodec'] = 'none' + else: + formats[0]['format_id'] = 'sd' return { - 'id': video_id, + 'id': media_info['identifier'].split('|')[1], 'display_id': display_id, 'formats': formats, - 'title': title, - 'description': description, + 'title': media_info['title'], + 'description': media_info.get('description'), 'thumbnail': thumbnail, + 'uploader_id': user.get('username'), + 'uploader': user.get('fullname'), + 'timestamp': parse_iso8601(media_info.get('published'), ' '), + 'duration': int_or_none(media_info.get('duration')), + 'view_count': int_or_none(media_info.get('count_views')), + 'like_count': int_or_none(media_info.get('count_likes')), + 'comment_count': int_or_none(media_info.get('count_comments')), + 'age_limit': age_limit, } diff --git a/youtube_dl/extractor/tube8.py b/youtube_dl/extractor/tube8.py index c9cb69333..46ef61ff5 100644 --- a/youtube_dl/extractor/tube8.py +++ b/youtube_dl/extractor/tube8.py @@ -4,12 +4,10 @@ import json import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_urlparse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse_urlparse from ..utils import ( int_or_none, + sanitized_Request, str_to_int, ) from ..aes import aes_decrypt_text @@ -42,7 +40,7 @@ class Tube8IE(InfoExtractor): video_id = mobj.group('id') display_id = mobj.group('display_id') - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, display_id) diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py index 4f86b3ee9..6d78b5dfe 100644 --- a/youtube_dl/extractor/tubitv.py +++ b/youtube_dl/extractor/tubitv.py @@ -5,13 +5,11 @@ import codecs import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) @@ -44,7 +42,7 @@ class TubiTvIE(InfoExtractor): 'password': password, } payload = compat_urllib_parse.urlencode(form_data).encode('utf-8') - request = compat_urllib_request.Request(self._LOGIN_URL, payload) + request = sanitized_Request(self._LOGIN_URL, payload) request.add_header('Content-Type', 'application/x-www-form-urlencoded') login_page = self._download_webpage( request, None, False, 'Wrong login info') diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py index 3d3b635e4..4f844706d 100644 --- a/youtube_dl/extractor/tumblr.py +++ b/youtube_dl/extractor/tumblr.py @@ -4,6 +4,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..utils import int_or_none class TumblrIE(InfoExtractor): @@ -29,6 +30,19 @@ class TumblrIE(InfoExtractor): 'thumbnail': 're:http://.*\.jpg', } }, { + 'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video', + 'md5': '7ae503065ad150122dc3089f8cf1546c', + 'info_dict': { + 'id': '130323439814', + 'ext': 'mp4', + 'title': 'HD Video Testing \u2014 Test description for my HD video', + 'description': 'md5:97cc3ab5fcd27ee4af6356701541319c', + 'thumbnail': 're:http://.*\.jpg', + }, + 'params': { + 'format': 'hd', + }, + }, { 'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching', 'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab', 'info_dict': { @@ -37,6 +51,9 @@ class TumblrIE(InfoExtractor): 'title': 'naked smoking & stretching', 'upload_date': '20150506', 'timestamp': 1430931613, + 'age_limit': 18, + 'uploader_id': '1638622', + 'uploader': 'naked-yogi', }, 'add_ie': ['Vidme'], }, { @@ -66,10 +83,38 @@ class TumblrIE(InfoExtractor): if iframe_url is None: return self.url_result(urlh.geturl(), 'Generic') - iframe = self._download_webpage(iframe_url, video_id, - 'Downloading iframe page') - video_url = self._search_regex(r'<source src="([^"]+)"', - iframe, 'video url') + iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page') + + duration = None + sources = [] + + sd_url = self._search_regex( + r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe, + 'sd video url', default=None, group='url') + if sd_url: + sources.append((sd_url, 'sd')) + + options = self._parse_json( + self._search_regex( + r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe, + 'hd video url', default='', group='options'), + video_id, fatal=False) + if options: + duration = int_or_none(options.get('duration')) + hd_url = options.get('hdUrl') + if hd_url: + sources.append((hd_url, 'hd')) + + formats = [{ + 'url': video_url, + 'ext': 'mp4', + 'format_id': format_id, + 'height': int_or_none(self._search_regex( + r'/(\d{3,4})$', video_url, 'height', default=None)), + 'quality': quality, + } for quality, (video_url, format_id) in enumerate(sources)] + + self._sort_formats(formats) # The only place where you can get a title, it's not complete, # but searching in other places doesn't work for all videos @@ -79,9 +124,9 @@ class TumblrIE(InfoExtractor): return { 'id': video_id, - 'url': video_url, - 'ext': 'mp4', 'title': video_title, 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), + 'duration': duration, + 'formats': formats, } diff --git a/youtube_dl/extractor/tunein.py b/youtube_dl/extractor/tunein.py index b6b1f2568..8322cc14d 100644 --- a/youtube_dl/extractor/tunein.py +++ b/youtube_dl/extractor/tunein.py @@ -2,74 +2,33 @@ from __future__ import unicode_literals import json -import re from .common import InfoExtractor from ..utils import ExtractorError +from ..compat import compat_urlparse -class TuneInIE(InfoExtractor): - _VALID_URL = r'''(?x)https?://(?:www\.)? - (?: - tunein\.com/ - (?: - radio/.*?-s| - station/.*?StationId\= - )(?P<id>[0-9]+) - |tun\.in/(?P<redirect_id>[A-Za-z0-9]+) - ) - ''' - _API_URL_TEMPLATE = 'http://tunein.com/tuner/tune/?stationId={0:}&tuneType=Station' - - _INFO_DICT = { - 'id': '34682', - 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', - 'ext': 'aac', - 'thumbnail': 're:^https?://.*\.png$', - 'location': 'Tacoma, WA', - } - _TESTS = [ - { - 'url': 'http://tunein.com/radio/Jazz24-885-s34682/', - 'info_dict': _INFO_DICT, - 'params': { - 'skip_download': True, # live stream - }, - }, - { # test redirection - 'url': 'http://tun.in/ser7s', - 'info_dict': _INFO_DICT, - 'params': { - 'skip_download': True, # live stream - }, - }, - ] +class TuneInBaseIE(InfoExtractor): + _API_BASE_URL = 'http://tunein.com/tuner/tune/' def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - redirect_id = mobj.group('redirect_id') - if redirect_id: - # The server doesn't support HEAD requests - urlh = self._request_webpage( - url, redirect_id, note='Downloading redirect page') - url = urlh.geturl() - self.to_screen('Following redirect: %s' % url) - mobj = re.match(self._VALID_URL, url) - station_id = mobj.group('id') - - station_info = self._download_json( - self._API_URL_TEMPLATE.format(station_id), - station_id, note='Downloading station JSON') - - title = station_info['Title'] - thumbnail = station_info.get('Logo') - location = station_info.get('Location') - streams_url = station_info.get('StreamUrl') + content_id = self._match_id(url) + + content_info = self._download_json( + self._API_BASE_URL + self._API_URL_QUERY % content_id, + content_id, note='Downloading JSON metadata') + + title = content_info['Title'] + thumbnail = content_info.get('Logo') + location = content_info.get('Location') + streams_url = content_info.get('StreamUrl') if not streams_url: - raise ExtractorError('No downloadable streams found', - expected=True) + raise ExtractorError('No downloadable streams found', expected=True) + if not streams_url.startswith('http://'): + streams_url = compat_urlparse.urljoin(url, streams_url) + stream_data = self._download_webpage( - streams_url, station_id, note='Downloading stream data') + streams_url, content_id, note='Downloading stream data') streams = json.loads(self._search_regex( r'\((.*)\);', stream_data, 'stream info'))['Streams'] @@ -97,10 +56,122 @@ class TuneInIE(InfoExtractor): self._sort_formats(formats) return { - 'id': station_id, + 'id': content_id, 'title': title, 'formats': formats, 'thumbnail': thumbnail, 'location': location, 'is_live': is_live, } + + +class TuneInClipIE(TuneInBaseIE): + IE_NAME = 'tunein:clip' + _VALID_URL = r'https?://(?:www\.)?tunein\.com/station/.*?audioClipId\=(?P<id>\d+)' + _API_URL_QUERY = '?tuneType=AudioClip&audioclipId=%s' + + _TESTS = [ + { + 'url': 'http://tunein.com/station/?stationId=246119&audioClipId=816', + 'md5': '99f00d772db70efc804385c6b47f4e77', + 'info_dict': { + 'id': '816', + 'title': '32m', + 'ext': 'mp3', + }, + }, + ] + + +class TuneInStationIE(TuneInBaseIE): + IE_NAME = 'tunein:station' + _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-s|station/.*?StationId\=)(?P<id>\d+)' + _API_URL_QUERY = '?tuneType=Station&stationId=%s' + + @classmethod + def suitable(cls, url): + return False if TuneInClipIE.suitable(url) else super(TuneInStationIE, cls).suitable(url) + + _TESTS = [ + { + 'url': 'http://tunein.com/radio/Jazz24-885-s34682/', + 'info_dict': { + 'id': '34682', + 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', + 'ext': 'mp3', + 'location': 'Tacoma, WA', + }, + 'params': { + 'skip_download': True, # live stream + }, + }, + ] + + +class TuneInProgramIE(TuneInBaseIE): + IE_NAME = 'tunein:program' + _VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-p|program/.*?ProgramId\=)(?P<id>\d+)' + _API_URL_QUERY = '?tuneType=Program&programId=%s' + + _TESTS = [ + { + 'url': 'http://tunein.com/radio/Jazz-24-p2506/', + 'info_dict': { + 'id': '2506', + 'title': 'Jazz 24 on 91.3 WUKY-HD3', + 'ext': 'mp3', + 'location': 'Lexington, KY', + }, + 'params': { + 'skip_download': True, # live stream + }, + }, + ] + + +class TuneInTopicIE(TuneInBaseIE): + IE_NAME = 'tunein:topic' + _VALID_URL = r'https?://(?:www\.)?tunein\.com/topic/.*?TopicId\=(?P<id>\d+)' + _API_URL_QUERY = '?tuneType=Topic&topicId=%s' + + _TESTS = [ + { + 'url': 'http://tunein.com/topic/?TopicId=101830576', + 'md5': 'c31a39e6f988d188252eae7af0ef09c9', + 'info_dict': { + 'id': '101830576', + 'title': 'Votez pour moi du 29 octobre 2015 (29/10/15)', + 'ext': 'mp3', + 'location': 'Belgium', + }, + }, + ] + + +class TuneInShortenerIE(InfoExtractor): + IE_NAME = 'tunein:shortener' + IE_DESC = False # Do not list + _VALID_URL = r'https?://tun\.in/(?P<id>[A-Za-z0-9]+)' + + _TEST = { + # test redirection + 'url': 'http://tun.in/ser7s', + 'info_dict': { + 'id': '34682', + 'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2', + 'ext': 'mp3', + 'location': 'Tacoma, WA', + }, + 'params': { + 'skip_download': True, # live stream + }, + } + + def _real_extract(self, url): + redirect_id = self._match_id(url) + # The server doesn't support HEAD requests + urlh = self._request_webpage( + url, redirect_id, note='Downloading redirect page') + url = urlh.geturl() + self.to_screen('Following redirect: %s' % url) + return self.url_result(url) diff --git a/youtube_dl/extractor/tutv.py b/youtube_dl/extractor/tutv.py index fad720b68..822372ea1 100644 --- a/youtube_dl/extractor/tutv.py +++ b/youtube_dl/extractor/tutv.py @@ -10,10 +10,10 @@ class TutvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?tu\.tv/videos/(?P<id>[^/?]+)' _TEST = { 'url': 'http://tu.tv/videos/robots-futbolistas', - 'md5': '627c7c124ac2a9b5ab6addb94e0e65f7', + 'md5': '0cd9e28ad270488911b0d2a72323395d', 'info_dict': { 'id': '2973058', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'Robots futbolistas', }, } diff --git a/youtube_dl/extractor/twentyfourvideo.py b/youtube_dl/extractor/twentyfourvideo.py index c1ee1decc..e03e2dbaa 100644 --- a/youtube_dl/extractor/twentyfourvideo.py +++ b/youtube_dl/extractor/twentyfourvideo.py @@ -5,6 +5,8 @@ from .common import InfoExtractor from ..utils import ( parse_iso8601, int_or_none, + xpath_attr, + xpath_element, ) @@ -15,7 +17,7 @@ class TwentyFourVideoIE(InfoExtractor): _TESTS = [ { 'url': 'http://www.24video.net/video/view/1044982', - 'md5': 'd041af8b5b4246ea466226a0d6693345', + 'md5': 'e09fc0901d9eaeedac872f154931deeb', 'info_dict': { 'id': '1044982', 'ext': 'mp4', @@ -64,33 +66,24 @@ class TwentyFourVideoIE(InfoExtractor): r'<div class="comments-title" id="comments-count">(\d+) комментари', webpage, 'comment count', fatal=False)) - formats = [] + # Sets some cookies + self._download_xml( + r'http://www.24video.net/video/xml/%s?mode=init' % video_id, + video_id, 'Downloading init XML') - pc_video = self._download_xml( + video_xml = self._download_xml( 'http://www.24video.net/video/xml/%s?mode=play' % video_id, - video_id, 'Downloading PC video URL').find('.//video') + video_id, 'Downloading video XML') - formats.append({ - 'url': pc_video.attrib['url'], - 'format_id': 'pc', - 'quality': 1, - }) + video = xpath_element(video_xml, './/video', 'video', fatal=True) - like_count = int_or_none(pc_video.get('ratingPlus')) - dislike_count = int_or_none(pc_video.get('ratingMinus')) - age_limit = 18 if pc_video.get('adult') == 'true' else 0 + formats = [{ + 'url': xpath_attr(video, '', 'url', 'video URL', fatal=True), + }] - mobile_video = self._download_xml( - 'http://www.24video.net/video/xml/%s' % video_id, - video_id, 'Downloading mobile video URL').find('.//video') - - formats.append({ - 'url': mobile_video.attrib['url'], - 'format_id': 'mobile', - 'quality': 0, - }) - - self._sort_formats(formats) + like_count = int_or_none(video.get('ratingPlus')) + dislike_count = int_or_none(video.get('ratingMinus')) + age_limit = 18 if video.get('adult') == 'true' else 0 return { 'id': video_id, diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py index 023911c41..69882da63 100644 --- a/youtube_dl/extractor/twitch.py +++ b/youtube_dl/extractor/twitch.py @@ -11,14 +11,15 @@ from ..compat import ( compat_str, compat_urllib_parse, compat_urllib_parse_urlparse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( + encode_dict, ExtractorError, int_or_none, parse_duration, parse_iso8601, + sanitized_Request, ) @@ -27,8 +28,7 @@ class TwitchBaseIE(InfoExtractor): _API_BASE = 'https://api.twitch.tv' _USHER_BASE = 'http://usher.twitch.tv' - _LOGIN_URL = 'https://secure.twitch.tv/login' - _LOGIN_POST_URL = 'https://passport.twitch.tv/authentications/new' + _LOGIN_URL = 'http://www.twitch.tv/login' _NETRC_MACHINE = 'twitch' def _handle_error(self, response): @@ -48,7 +48,7 @@ class TwitchBaseIE(InfoExtractor): for cookie in self._downloader.cookiejar: if cookie.name == 'api_token': headers['Twitch-Api-Token'] = cookie.value - request = compat_urllib_request.Request(url, headers=headers) + request = sanitized_Request(url, headers=headers) response = super(TwitchBaseIE, self)._download_json(request, video_id, note) self._handle_error(response) return response @@ -61,26 +61,28 @@ class TwitchBaseIE(InfoExtractor): if username is None: return - login_page = self._download_webpage( + login_page, handle = self._download_webpage_handle( self._LOGIN_URL, None, 'Downloading login page') login_form = self._hidden_inputs(login_page) login_form.update({ - 'login': username.encode('utf-8'), - 'password': password.encode('utf-8'), + 'username': username, + 'password': password, }) + redirect_url = handle.geturl() + post_url = self._search_regex( r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, - 'post url', default=self._LOGIN_POST_URL, group='url') + 'post url', default=redirect_url, group='url') if not post_url.startswith('http'): - post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) + post_url = compat_urlparse.urljoin(redirect_url, post_url) - request = compat_urllib_request.Request( - post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8')) - request.add_header('Referer', self._LOGIN_URL) + request = sanitized_Request( + post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8')) + request.add_header('Referer', redirect_url) response = self._download_webpage( request, None, 'Logging in as %s' % username) @@ -238,14 +240,24 @@ class TwitchVodIE(TwitchItemBaseIE): def _real_extract(self, url): item_id = self._match_id(url) + info = self._download_info(self._ITEM_SHORTCUT, item_id) access_token = self._download_json( '%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id, 'Downloading %s access token' % self._ITEM_TYPE) + formats = self._extract_m3u8_formats( - '%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true' - % (self._USHER_BASE, item_id, access_token['token'], access_token['sig']), + '%s/vod/%s?%s' % ( + self._USHER_BASE, item_id, + compat_urllib_parse.urlencode({ + 'allow_source': 'true', + 'allow_spectre': 'true', + 'player': 'twitchweb', + 'nauth': access_token['token'], + 'nauthsig': access_token['sig'], + })), item_id, 'mp4') + self._prefer_source(formats) info['formats'] = formats diff --git a/youtube_dl/extractor/twitter.py b/youtube_dl/extractor/twitter.py index 1aaa06305..a161f046b 100644 --- a/youtube_dl/extractor/twitter.py +++ b/youtube_dl/extractor/twitter.py @@ -1,28 +1,73 @@ +# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_urllib_request from ..utils import ( float_or_none, - unescapeHTML, + xpath_text, + remove_end, + int_or_none, + ExtractorError, + sanitized_Request, ) class TwitterCardIE(InfoExtractor): + IE_NAME = 'twitter:card' _VALID_URL = r'https?://(?:www\.)?twitter\.com/i/cards/tfw/v1/(?P<id>\d+)' - _TEST = { - 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889', - 'md5': 'a74f50b310c83170319ba16de6955192', - 'info_dict': { - 'id': '560070183650213889', - 'ext': 'mp4', - 'title': 'TwitterCard', - 'thumbnail': 're:^https?://.*\.jpg$', - 'duration': 30.033, + _TESTS = [ + { + 'url': 'https://twitter.com/i/cards/tfw/v1/560070183650213889', + 'md5': '4fa26a35f9d1bf4b646590ba8e84be19', + 'info_dict': { + 'id': '560070183650213889', + 'ext': 'mp4', + 'title': 'TwitterCard', + 'thumbnail': 're:^https?://.*\.jpg$', + 'duration': 30.033, + } }, - } + { + 'url': 'https://twitter.com/i/cards/tfw/v1/623160978427936768', + 'md5': '7ee2a553b63d1bccba97fbed97d9e1c8', + 'info_dict': { + 'id': '623160978427936768', + 'ext': 'mp4', + 'title': 'TwitterCard', + 'thumbnail': 're:^https?://.*\.jpg', + 'duration': 80.155, + }, + }, + { + 'url': 'https://twitter.com/i/cards/tfw/v1/654001591733886977', + 'md5': 'b6f35e8b08a0bec6c8af77a2f4b3a814', + 'info_dict': { + 'id': 'dq4Oj5quskI', + 'ext': 'mp4', + 'title': 'Ubuntu 11.10 Overview', + 'description': 'Take a quick peek at what\'s new and improved in Ubuntu 11.10.\n\nOnce installed take a look at 10 Things to Do After Installing: http://www.omgubuntu.co.uk/2011/10/10-things-to-do-after-installing-ubuntu-11-10/', + 'upload_date': '20111013', + 'uploader': 'OMG! Ubuntu!', + 'uploader_id': 'omgubuntu', + }, + 'add_ie': ['Youtube'], + }, + { + 'url': 'https://twitter.com/i/cards/tfw/v1/665289828897005568', + 'md5': 'ab2745d0b0ce53319a534fccaa986439', + 'info_dict': { + 'id': 'iBb2x00UVlv', + 'ext': 'mp4', + 'upload_date': '20151113', + 'uploader_id': '1189339351084113920', + 'uploader': '@ArsenalTerje', + 'title': 'Vine by @ArsenalTerje', + }, + 'add_ie': ['Vine'], + } + ] def _real_extract(self, url): video_id = self._match_id(url) @@ -36,14 +81,28 @@ class TwitterCardIE(InfoExtractor): config = None formats = [] for user_agent in USER_AGENTS: - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('User-Agent', user_agent) webpage = self._download_webpage(request, video_id) - config = self._parse_json( - unescapeHTML(self._search_regex( - r'data-player-config="([^"]+)"', webpage, 'data player config')), + iframe_url = self._html_search_regex( + r'<iframe[^>]+src="((?:https?:)?//(?:www.youtube.com/embed/[^"]+|(?:www\.)?vine\.co/v/\w+/card))"', + webpage, 'video iframe', default=None) + if iframe_url: + return self.url_result(iframe_url) + + config = self._parse_json(self._html_search_regex( + r'data-player-config="([^"]+)"', webpage, 'data player config'), video_id) + if 'playlist' not in config: + if 'vmapUrl' in config: + vmap_data = self._download_xml(config['vmapUrl'], video_id) + video_url = xpath_text(vmap_data, './/MediaFile').strip() + formats.append({ + 'url': video_url, + }) + break # same video regardless of UA + continue video_url = config['playlist'][0]['source'] @@ -70,3 +129,100 @@ class TwitterCardIE(InfoExtractor): 'duration': duration, 'formats': formats, } + + +class TwitterIE(InfoExtractor): + IE_NAME = 'twitter' + _VALID_URL = r'https?://(?:www\.|m\.|mobile\.)?twitter\.com/(?P<user_id>[^/]+)/status/(?P<id>\d+)' + _TEMPLATE_URL = 'https://twitter.com/%s/status/%s' + + _TESTS = [{ + 'url': 'https://twitter.com/freethenipple/status/643211948184596480', + 'md5': 'db6612ec5d03355953c3ca9250c97e5e', + 'info_dict': { + 'id': '643211948184596480', + 'ext': 'mp4', + 'title': 'FREE THE NIPPLE - FTN supporters on Hollywood Blvd today!', + 'thumbnail': 're:^https?://.*\.jpg', + 'duration': 12.922, + 'description': 'FREE THE NIPPLE on Twitter: "FTN supporters on Hollywood Blvd today! http://t.co/c7jHH749xJ"', + 'uploader': 'FREE THE NIPPLE', + 'uploader_id': 'freethenipple', + }, + }, { + 'url': 'https://twitter.com/giphz/status/657991469417025536/photo/1', + 'md5': 'f36dcd5fb92bf7057f155e7d927eeb42', + 'info_dict': { + 'id': '657991469417025536', + 'ext': 'mp4', + 'title': 'Gifs - tu vai cai tu vai cai tu nao eh capaz disso tu vai cai', + 'description': 'Gifs on Twitter: "tu vai cai tu vai cai tu nao eh capaz disso tu vai cai https://t.co/tM46VHFlO5"', + 'thumbnail': 're:^https?://.*\.png', + 'uploader': 'Gifs', + 'uploader_id': 'giphz', + }, + }, { + 'url': 'https://twitter.com/starwars/status/665052190608723968', + 'md5': '39b7199856dee6cd4432e72c74bc69d4', + 'info_dict': { + 'id': '665052190608723968', + 'ext': 'mp4', + 'title': 'Star Wars - A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens.', + 'description': 'Star Wars on Twitter: "A new beginning is coming December 18. Watch the official 60 second #TV spot for #StarWars: #TheForceAwakens."', + 'uploader_id': 'starwars', + 'uploader': 'Star Wars', + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + user_id = mobj.group('user_id') + twid = mobj.group('id') + + webpage = self._download_webpage(self._TEMPLATE_URL % (user_id, twid), twid) + + username = remove_end(self._og_search_title(webpage), ' on Twitter') + + title = description = self._og_search_description(webpage).strip('').replace('\n', ' ').strip('“”') + + # strip 'https -_t.co_BJYgOjSeGA' junk from filenames + title = re.sub(r'\s+(https?://[^ ]+)', '', title) + + info = { + 'uploader_id': user_id, + 'uploader': username, + 'webpage_url': url, + 'description': '%s on Twitter: "%s"' % (username, description), + 'title': username + ' - ' + title, + } + + card_id = self._search_regex( + r'["\']/i/cards/tfw/v1/(\d+)', webpage, 'twitter card url', default=None) + if card_id: + card_url = 'https://twitter.com/i/cards/tfw/v1/' + card_id + info.update({ + '_type': 'url_transparent', + 'ie_key': 'TwitterCard', + 'url': card_url, + }) + return info + + mobj = re.search(r'''(?x) + <video[^>]+class="animated-gif"[^>]+ + (?:data-height="(?P<height>\d+)")?[^>]+ + (?:data-width="(?P<width>\d+)")?[^>]+ + (?:poster="(?P<poster>[^"]+)")?[^>]*>\s* + <source[^>]+video-src="(?P<url>[^"]+)" + ''', webpage) + + if mobj: + info.update({ + 'id': twid, + 'url': mobj.group('url'), + 'height': int_or_none(mobj.group('height')), + 'width': int_or_none(mobj.group('width')), + 'thumbnail': mobj.group('poster'), + }) + return info + + raise ExtractorError('There\'s not video in this tweet.') diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py index 365d8b4bf..59832b1ec 100644 --- a/youtube_dl/extractor/udemy.py +++ b/youtube_dl/extractor/udemy.py @@ -1,14 +1,16 @@ from __future__ import unicode_literals -import re - from .common import InfoExtractor from ..compat import ( + compat_HTTPError, compat_urllib_parse, compat_urllib_request, ) from ..utils import ( ExtractorError, + float_or_none, + int_or_none, + sanitized_Request, ) @@ -17,6 +19,8 @@ class UdemyIE(InfoExtractor): _VALID_URL = r'https?://www\.udemy\.com/(?:[^#]+#/lecture/|lecture/view/?\?lectureId=)(?P<id>\d+)' _LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1' _ORIGIN_URL = 'https://www.udemy.com' + _SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<' + _ALREADY_ENROLLED = '>You are already taking this course.<' _NETRC_MACHINE = 'udemy' _TESTS = [{ @@ -32,6 +36,29 @@ class UdemyIE(InfoExtractor): 'skip': 'Requires udemy account credentials', }] + def _enroll_course(self, webpage, course_id): + enroll_url = self._search_regex( + r'href=(["\'])(?P<url>https?://(?:www\.)?udemy\.com/course/subscribe/.+?)\1', + webpage, 'enroll url', group='url', + default='https://www.udemy.com/course/subscribe/?courseId=%s' % course_id) + webpage = self._download_webpage(enroll_url, course_id, 'Enrolling in the course') + if self._SUCCESSFULLY_ENROLLED in webpage: + self.to_screen('%s: Successfully enrolled in' % course_id) + elif self._ALREADY_ENROLLED in webpage: + self.to_screen('%s: Already enrolled in' % course_id) + + def _download_lecture(self, course_id, lecture_id): + return self._download_json( + 'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % ( + course_id, lecture_id, compat_urllib_parse.urlencode({ + 'video_only': '', + 'auto_play': '', + 'fields[lecture]': 'title,description,asset', + 'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,data', + 'instructorPreviewMode': 'False', + })), + lecture_id, 'Downloading lecture JSON') + def _handle_error(self, response): if not isinstance(response, dict): return @@ -53,12 +80,13 @@ class UdemyIE(InfoExtractor): headers['X-Udemy-Client-Id'] = cookie.value elif cookie.name == 'access_token': headers['X-Udemy-Bearer-Token'] = cookie.value + headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value if isinstance(url_or_request, compat_urllib_request.Request): for header, value in headers.items(): url_or_request.add_header(header, value) else: - url_or_request = compat_urllib_request.Request(url_or_request, headers=headers) + url_or_request = sanitized_Request(url_or_request, headers=headers) response = super(UdemyIE, self)._download_json(url_or_request, video_id, note) self._handle_error(response) @@ -70,7 +98,7 @@ class UdemyIE(InfoExtractor): def _login(self): (username, password) = self._get_login_info() if username is None: - self.raise_login_required('Udemy account is required') + return login_popup = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') @@ -89,7 +117,7 @@ class UdemyIE(InfoExtractor): 'password': password.encode('utf-8'), }) - request = compat_urllib_request.Request( + request = sanitized_Request( self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) request.add_header('Referer', self._ORIGIN_URL) request.add_header('Origin', self._ORIGIN_URL) @@ -108,44 +136,76 @@ class UdemyIE(InfoExtractor): def _real_extract(self, url): lecture_id = self._match_id(url) - lecture = self._download_json( - 'https://www.udemy.com/api-1.1/lectures/%s' % lecture_id, - lecture_id, 'Downloading lecture JSON') + webpage = self._download_webpage(url, lecture_id) + + course_id = self._search_regex( + r'data-course-id=["\'](\d+)', webpage, 'course id') + + try: + lecture = self._download_lecture(course_id, lecture_id) + except ExtractorError as e: + # Error could possibly mean we are not enrolled in the course + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: + self._enroll_course(webpage, course_id) + lecture_id = self._download_lecture(course_id, lecture_id) + else: + raise + + title = lecture['title'] + description = lecture.get('description') - asset_type = lecture.get('assetType') or lecture.get('asset_type') + asset = lecture['asset'] + + asset_type = asset.get('assetType') or asset.get('asset_type') if asset_type != 'Video': raise ExtractorError( 'Lecture %s is not a video' % lecture_id, expected=True) - asset = lecture['asset'] - stream_url = asset.get('streamUrl') or asset.get('stream_url') - mobj = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url) - if mobj: - return self.url_result(mobj.group(1), 'Youtube') + if stream_url: + youtube_url = self._search_regex( + r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None) + if youtube_url: + return self.url_result(youtube_url, 'Youtube') video_id = asset['id'] thumbnail = asset.get('thumbnailUrl') or asset.get('thumbnail_url') - duration = asset['data']['duration'] - - download_url = asset.get('downloadUrl') or asset.get('download_url') - - video = download_url.get('Video') or download_url.get('video') - video_480p = download_url.get('Video480p') or download_url.get('video_480p') - - formats = [ - { - 'url': video_480p[0], - 'format_id': '360p', - }, - { - 'url': video[0], - 'format_id': '720p', - }, - ] - - title = lecture['title'] - description = lecture['description'] + duration = float_or_none(asset.get('data', {}).get('duration')) + outputs = asset.get('data', {}).get('outputs', {}) + + formats = [] + for format_ in asset.get('download_urls', {}).get('Video', []): + video_url = format_.get('file') + if not video_url: + continue + format_id = format_.get('label') + f = { + 'url': format_['file'], + 'height': int_or_none(format_id), + } + if format_id: + # Some videos contain additional metadata (e.g. + # https://www.udemy.com/ios9-swift/learn/#/lecture/3383208) + output = outputs.get(format_id) + if isinstance(output, dict): + f.update({ + 'format_id': '%sp' % (output.get('label') or format_id), + 'width': int_or_none(output.get('width')), + 'height': int_or_none(output.get('height')), + 'vbr': int_or_none(output.get('video_bitrate_in_kbps')), + 'vcodec': output.get('video_codec'), + 'fps': int_or_none(output.get('frame_rate')), + 'abr': int_or_none(output.get('audio_bitrate_in_kbps')), + 'acodec': output.get('audio_codec'), + 'asr': int_or_none(output.get('audio_sample_rate')), + 'tbr': int_or_none(output.get('total_bitrate_in_kbps')), + 'filesize': int_or_none(output.get('file_size_in_bytes')), + }) + else: + f['format_id'] = '%sp' % format_id + formats.append(f) + + self._sort_formats(formats) return { 'id': video_id, @@ -159,9 +219,7 @@ class UdemyIE(InfoExtractor): class UdemyCourseIE(UdemyIE): IE_NAME = 'udemy:course' - _VALID_URL = r'https?://www\.udemy\.com/(?P<coursepath>[\da-z-]+)' - _SUCCESSFULLY_ENROLLED = '>You have enrolled in this course!<' - _ALREADY_ENROLLED = '>You are already taking this course.<' + _VALID_URL = r'https?://www\.udemy\.com/(?P<id>[\da-z-]+)' _TESTS = [] @classmethod @@ -169,24 +227,18 @@ class UdemyCourseIE(UdemyIE): return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url) def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - course_path = mobj.group('coursepath') + course_path = self._match_id(url) + + webpage = self._download_webpage(url, course_path) response = self._download_json( 'https://www.udemy.com/api-1.1/courses/%s' % course_path, course_path, 'Downloading course JSON') - course_id = int(response['id']) - course_title = response['title'] + course_id = response['id'] + course_title = response.get('title') - webpage = self._download_webpage( - 'https://www.udemy.com/course/subscribe/?courseId=%s' % course_id, - course_id, 'Enrolling in the course') - - if self._SUCCESSFULLY_ENROLLED in webpage: - self.to_screen('%s: Successfully enrolled in' % course_id) - elif self._ALREADY_ENROLLED in webpage: - self.to_screen('%s: Already enrolled in' % course_id) + self._enroll_course(webpage, course_id) response = self._download_json( 'https://www.udemy.com/api-1.1/courses/%s/curriculum' % course_id, diff --git a/youtube_dl/extractor/udn.py b/youtube_dl/extractor/udn.py index 2151f8338..ee35b7227 100644 --- a/youtube_dl/extractor/udn.py +++ b/youtube_dl/extractor/udn.py @@ -12,7 +12,8 @@ from ..compat import compat_urlparse class UDNEmbedIE(InfoExtractor): IE_DESC = '聯合影音' - _VALID_URL = r'https?://video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)' + _PROTOCOL_RELATIVE_VALID_URL = r'//video\.udn\.com/(?:embed|play)/news/(?P<id>\d+)' + _VALID_URL = r'https?:' + _PROTOCOL_RELATIVE_VALID_URL _TESTS = [{ 'url': 'http://video.udn.com/embed/news/300040', 'md5': 'de06b4c90b042c128395a88f0384817e', diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py index c39c278ab..73b05ecab 100644 --- a/youtube_dl/extractor/ustream.py +++ b/youtube_dl/extractor/ustream.py @@ -1,17 +1,20 @@ from __future__ import unicode_literals -import json import re from .common import InfoExtractor from ..compat import ( compat_urlparse, ) -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + int_or_none, + float_or_none, +) class UstreamIE(InfoExtractor): - _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<videoID>\d+)' + _VALID_URL = r'https?://www\.ustream\.tv/(?P<type>recorded|embed|embed/recorded)/(?P<id>\d+)' IE_NAME = 'ustream' _TESTS = [{ 'url': 'http://www.ustream.tv/recorded/20274954', @@ -19,8 +22,12 @@ class UstreamIE(InfoExtractor): 'info_dict': { 'id': '20274954', 'ext': 'flv', - 'uploader': 'Young Americans for Liberty', 'title': 'Young Americans for Liberty February 7, 2012 2:28 AM', + 'description': 'Young Americans for Liberty February 7, 2012 2:28 AM', + 'timestamp': 1328577035, + 'upload_date': '20120207', + 'uploader': 'yaliberty', + 'uploader_id': '6780869', }, }, { # From http://sportscanada.tv/canadagames/index.php/week2/figure-skating/444 @@ -32,20 +39,21 @@ class UstreamIE(InfoExtractor): 'ext': 'flv', 'title': '-CG11- Canada Games Figure Skating', 'uploader': 'sportscanadatv', - } + }, + 'skip': 'This Pro Broadcaster has chosen to remove this video from the ustream.tv site.', }] def _real_extract(self, url): m = re.match(self._VALID_URL, url) - video_id = m.group('videoID') + video_id = m.group('id') # some sites use this embed format (see: http://github.com/rg3/youtube-dl/issues/2990) if m.group('type') == 'embed/recorded': - video_id = m.group('videoID') + video_id = m.group('id') desktop_url = 'http://www.ustream.tv/recorded/' + video_id return self.url_result(desktop_url, 'Ustream') if m.group('type') == 'embed': - video_id = m.group('videoID') + video_id = m.group('id') webpage = self._download_webpage(url, video_id) desktop_video_id = self._html_search_regex( r'ContentVideoIds=\["([^"]*?)"\]', webpage, 'desktop_video_id') @@ -53,52 +61,50 @@ class UstreamIE(InfoExtractor): return self.url_result(desktop_url, 'Ustream') params = self._download_json( - 'http://cdngw.ustream.tv/rgwjson/Viewer.getVideo/' + json.dumps({ - 'brandId': 1, - 'videoId': int(video_id), - 'autoplay': False, - }), video_id) - - if 'error' in params: - raise ExtractorError(params['error']['message'], expected=True) - - video_url = params['flv'] + 'https://api.ustream.tv/videos/%s.json' % video_id, video_id) - webpage = self._download_webpage(url, video_id) + error = params.get('error') + if error: + raise ExtractorError( + '%s returned error: %s' % (self.IE_NAME, error), expected=True) - self.report_extraction(video_id) + video = params['video'] - video_title = self._html_search_regex(r'data-title="(?P<title>.+)"', - webpage, 'title', default=None) + title = video['title'] + filesize = float_or_none(video.get('file_size')) - if not video_title: - try: - video_title = params['moduleConfig']['meta']['title'] - except KeyError: - pass - - if not video_title: - video_title = 'Ustream video ' + video_id + formats = [{ + 'id': video_id, + 'url': video_url, + 'ext': format_id, + 'filesize': filesize, + } for format_id, video_url in video['media_urls'].items()] + self._sort_formats(formats) - uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>', - webpage, 'uploader', fatal=False, flags=re.DOTALL, default=None) + description = video.get('description') + timestamp = int_or_none(video.get('created_at')) + duration = float_or_none(video.get('length')) + view_count = int_or_none(video.get('views')) - if not uploader: - try: - uploader = params['moduleConfig']['meta']['userName'] - except KeyError: - uploader = None + uploader = video.get('owner', {}).get('username') + uploader_id = video.get('owner', {}).get('id') - thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"', - webpage, 'thumbnail', fatal=False) + thumbnails = [{ + 'id': thumbnail_id, + 'url': thumbnail_url, + } for thumbnail_id, thumbnail_url in video.get('thumbnail', {}).items()] return { 'id': video_id, - 'url': video_url, - 'ext': 'flv', - 'title': video_title, + 'title': title, + 'description': description, + 'thumbnails': thumbnails, + 'timestamp': timestamp, + 'duration': duration, + 'view_count': view_count, 'uploader': uploader, - 'thumbnail': thumbnail, + 'uploader_id': uploader_id, + 'formats': formats, } diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py index 722eb5236..1e740fbe6 100644 --- a/youtube_dl/extractor/vbox7.py +++ b/youtube_dl/extractor/vbox7.py @@ -4,11 +4,11 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( ExtractorError, + sanitized_Request, ) @@ -49,7 +49,7 @@ class Vbox7IE(InfoExtractor): info_url = "http://vbox7.com/play/magare.do" data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id}) - info_request = compat_urllib_request.Request(info_url, data) + info_request = sanitized_Request(info_url, data) info_request.add_header('Content-Type', 'application/x-www-form-urlencoded') info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage') if info_response is None: diff --git a/youtube_dl/extractor/veoh.py b/youtube_dl/extractor/veoh.py index 01e258e32..9633f7ffe 100644 --- a/youtube_dl/extractor/veoh.py +++ b/youtube_dl/extractor/veoh.py @@ -4,12 +4,10 @@ import re import json from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, -) from ..utils import ( int_or_none, ExtractorError, + sanitized_Request, ) @@ -110,7 +108,7 @@ class VeohIE(InfoExtractor): if 'class="adultwarning-container"' in webpage: self.report_age_confirmation() age_limit = 18 - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Cookie', 'confirmedAdult=true') webpage = self._download_webpage(request, video_id) diff --git a/youtube_dl/extractor/vessel.py b/youtube_dl/extractor/vessel.py index 3c8d2a943..1a0ff3395 100644 --- a/youtube_dl/extractor/vessel.py +++ b/youtube_dl/extractor/vessel.py @@ -4,10 +4,10 @@ from __future__ import unicode_literals import json from .common import InfoExtractor -from ..compat import compat_urllib_request from ..utils import ( ExtractorError, parse_iso8601, + sanitized_Request, ) @@ -33,7 +33,7 @@ class VesselIE(InfoExtractor): @staticmethod def make_json_request(url, data): payload = json.dumps(data).encode('utf-8') - req = compat_urllib_request.Request(url, payload) + req = sanitized_Request(url, payload) req.add_header('Content-Type', 'application/json; charset=utf-8') return req diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py index c17094f81..02dfd36f4 100644 --- a/youtube_dl/extractor/vevo.py +++ b/youtube_dl/extractor/vevo.py @@ -1,15 +1,16 @@ from __future__ import unicode_literals import re -import xml.etree.ElementTree from .common import InfoExtractor from ..compat import ( - compat_urllib_request, + compat_etree_fromstring, + compat_urlparse, ) from ..utils import ( ExtractorError, int_or_none, + sanitized_Request, ) @@ -69,11 +70,22 @@ class VevoIE(InfoExtractor): 'params': { 'skip_download': 'true', } + }, { + 'note': 'No video_info', + 'url': 'http://www.vevo.com/watch/k-camp-1/Till-I-Die/USUV71503000', + 'md5': '8b83cc492d72fc9cf74a02acee7dc1b0', + 'info_dict': { + 'id': 'USUV71503000', + 'ext': 'mp4', + 'title': 'Till I Die - K Camp ft. T.I.', + 'duration': 193, + }, + 'expected_warnings': ['Unable to download SMIL file'], }] _SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/' def _real_initialize(self): - req = compat_urllib_request.Request( + req = sanitized_Request( 'http://www.vevo.com/auth', data=b'') webpage = self._download_webpage( req, None, @@ -83,11 +95,17 @@ class VevoIE(InfoExtractor): if webpage is False: self._oauth_token = None else: + if 'THIS PAGE IS CURRENTLY UNAVAILABLE IN YOUR REGION' in webpage: + raise ExtractorError('%s said: This page is currently unavailable in your region.' % self.IE_NAME, expected=True) + self._oauth_token = self._search_regex( r'access_token":\s*"([^"]+)"', webpage, 'access token', fatal=False) def _formats_from_json(self, video_info): + if not video_info: + return [] + last_version = {'version': -1} for version in video_info['videoVersions']: # These are the HTTP downloads, other types are for different manifests @@ -97,7 +115,7 @@ class VevoIE(InfoExtractor): if last_version['version'] == -1: raise ExtractorError('Unable to extract last version of the video') - renditions = xml.etree.ElementTree.fromstring(last_version['data']) + renditions = compat_etree_fromstring(last_version['data']) formats = [] # Already sorted from worst to best quality for rend in renditions.findall('rendition'): @@ -112,9 +130,8 @@ class VevoIE(InfoExtractor): }) return formats - def _formats_from_smil(self, smil_xml): + def _formats_from_smil(self, smil_doc): formats = [] - smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8')) els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video') for el in els: src = el.attrib['src'] @@ -147,14 +164,14 @@ class VevoIE(InfoExtractor): }) return formats - def _download_api_formats(self, video_id): + def _download_api_formats(self, video_id, video_url): if not self._oauth_token: self._downloader.report_warning( 'No oauth token available, skipping API HLS download') return [] - api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % ( - video_id, self._oauth_token) + api_url = compat_urlparse.urljoin(video_url, '//apiv2.vevo.com/video/%s/streams/hls?token=%s' % ( + video_id, self._oauth_token)) api_data = self._download_json( api_url, video_id, note='Downloading HLS formats', @@ -168,18 +185,26 @@ class VevoIE(InfoExtractor): preference=0) def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') + video_id = self._match_id(url) + + webpage = None json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id response = self._download_json(json_url, video_id) - video_info = response['video'] + video_info = response['video'] or {} - if not video_info: + if not video_info and response.get('statusCode') != 909: if 'statusMessage' in response: raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusMessage']), expected=True) raise ExtractorError('Unable to extract videos') + if not video_info: + if url.startswith('vevo:'): + raise ExtractorError('Please specify full Vevo URL for downloading', expected=True) + webpage = self._download_webpage(url, video_id) + + title = video_info.get('title') or self._og_search_title(webpage) + formats = self._formats_from_json(video_info) is_explicit = video_info.get('isExplicit') @@ -191,11 +216,11 @@ class VevoIE(InfoExtractor): age_limit = None # Download via HLS API - formats.extend(self._download_api_formats(video_id)) + formats.extend(self._download_api_formats(video_id, url)) # Download SMIL smil_blocks = sorted(( - f for f in video_info['videoVersions'] + f for f in video_info.get('videoVersions', []) if f['sourceType'] == 13), key=lambda f: f['version']) smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % ( @@ -207,23 +232,26 @@ class VevoIE(InfoExtractor): if smil_url_m is not None: smil_url = smil_url_m if smil_url: - smil_xml = self._download_webpage( - smil_url, video_id, 'Downloading SMIL info', fatal=False) - if smil_xml: - formats.extend(self._formats_from_smil(smil_xml)) + smil_doc = self._download_smil(smil_url, video_id, fatal=False) + if smil_doc: + formats.extend(self._formats_from_smil(smil_doc)) self._sort_formats(formats) - timestamp_ms = int_or_none(self._search_regex( + timestamp = int_or_none(self._search_regex( r'/Date\((\d+)\)/', - video_info['launchDate'], 'launch date', fatal=False)) + video_info['launchDate'], 'launch date', fatal=False), + scale=1000) if video_info else None + + duration = video_info.get('duration') or int_or_none( + self._html_search_meta('video:duration', webpage)) return { 'id': video_id, - 'title': video_info['title'], + 'title': title, 'formats': formats, - 'thumbnail': video_info['imageUrl'], - 'timestamp': timestamp_ms // 1000, - 'uploader': video_info['mainArtists'][0]['artistName'], - 'duration': video_info['duration'], + 'thumbnail': video_info.get('imageUrl'), + 'timestamp': timestamp, + 'uploader': video_info['mainArtists'][0]['artistName'] if video_info else None, + 'duration': duration, 'age_limit': age_limit, } diff --git a/youtube_dl/extractor/vgtv.py b/youtube_dl/extractor/vgtv.py index f38a72fde..86ba70ed9 100644 --- a/youtube_dl/extractor/vgtv.py +++ b/youtube_dl/extractor/vgtv.py @@ -4,26 +4,48 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from .xstream import XstreamIE from ..utils import ( ExtractorError, float_or_none, ) -class VGTVIE(InfoExtractor): - IE_DESC = 'VGTV and BTTV' +class VGTVIE(XstreamIE): + IE_DESC = 'VGTV, BTTV, FTV, Aftenposten and Aftonbladet' + + _HOST_TO_APPNAME = { + 'vgtv.no': 'vgtv', + 'bt.no/tv': 'bttv', + 'aftenbladet.no/tv': 'satv', + 'fvn.no/fvntv': 'fvntv', + 'aftenposten.no/webtv': 'aptv', + } + + _APP_NAME_TO_VENDOR = { + 'vgtv': 'vgtv', + 'bttv': 'bt', + 'satv': 'sa', + 'fvntv': 'fvn', + 'aptv': 'ap', + } + _VALID_URL = r'''(?x) - (?: - vgtv:| - http://(?:www\.)? + (?:https?://(?:www\.)? + (?P<host> + %s ) - (?P<host>vgtv|bt) + / (?: - :| - \.no/(?:tv/)?\#!/(?:video|live)/ - ) - (?P<id>[0-9]+) - ''' + \#!/(?:video|live)/| + embed?.*id= + )| + (?P<appname> + %s + ):) + (?P<id>\d+) + ''' % ('|'.join(_HOST_TO_APPNAME.keys()), '|'.join(_APP_NAME_TO_VENDOR.keys())) + _TESTS = [ { # streamType: vod @@ -59,25 +81,37 @@ class VGTVIE(InfoExtractor): # m3u8 download 'skip_download': True, }, + 'skip': 'Video is no longer available', }, { - # streamType: live + # streamType: wasLive 'url': 'http://www.vgtv.no/#!/live/113063/direkte-v75-fra-solvalla', + 'md5': '458f4841239dab414343b50e5af8869c', 'info_dict': { 'id': '113063', 'ext': 'flv', - 'title': 're:^DIREKTE: V75 fra Solvalla [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'title': 'V75 fra Solvalla 30.05.15', 'description': 'md5:b3743425765355855f88e096acc93231', 'thumbnail': 're:^https?://.*\.jpg', - 'duration': 0, + 'duration': 25966, 'timestamp': 1432975582, 'upload_date': '20150530', 'view_count': int, }, - 'params': { - # m3u8 download - 'skip_download': True, - }, + }, + { + 'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more', + 'md5': 'fd828cd29774a729bf4d4425fe192972', + 'info_dict': { + 'id': '21039', + 'ext': 'mov', + 'title': 'TRAILER: «SWEATSHOP» - I can´t take any more', + 'description': 'md5:21891f2b0dd7ec2f78d84a50e54f8238', + 'duration': 66, + 'timestamp': 1417002452, + 'upload_date': '20141126', + 'view_count': int, + } }, { 'url': 'http://www.bt.no/tv/#!/video/100250/norling-dette-er-forskjellen-paa-1-divisjon-og-eliteserien', @@ -89,21 +123,27 @@ class VGTVIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') host = mobj.group('host') - - HOST_WEBSITES = { - 'vgtv': 'vgtv', - 'bt': 'bttv', - } + appname = self._HOST_TO_APPNAME[host] if host else mobj.group('appname') + vendor = self._APP_NAME_TO_VENDOR[appname] data = self._download_json( 'http://svp.vg.no/svp/api/v1/%s/assets/%s?appName=%s-website' - % (host, video_id, HOST_WEBSITES[host]), + % (vendor, video_id, appname), video_id, 'Downloading media JSON') if data.get('status') == 'inactive': raise ExtractorError( 'Video %s is no longer available' % video_id, expected=True) + info = { + 'formats': [], + } + if len(video_id) == 5: + if appname == 'bttv': + info = self._extract_video_info('btno', video_id) + elif appname == 'aptv': + info = self._extract_video_info('ap', video_id) + streams = data['streamUrls'] stream_type = data.get('streamType') @@ -112,47 +152,53 @@ class VGTVIE(InfoExtractor): hls_url = streams.get('hls') if hls_url: formats.extend(self._extract_m3u8_formats( - hls_url, video_id, 'mp4', m3u8_id='hls')) + hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) hds_url = streams.get('hds') - # wasLive hds are always 404 - if hds_url and stream_type != 'wasLive': - formats.extend(self._extract_f4m_formats( - hds_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18', - video_id, f4m_id='hds')) + if hds_url: + hdcore_sign = 'hdcore=3.7.0' + f4m_formats = self._extract_f4m_formats( + hds_url + '?%s' % hdcore_sign, video_id, f4m_id='hds', fatal=False) + if f4m_formats: + for entry in f4m_formats: + # URLs without the extra param induce an 404 error + entry.update({'extra_param_to_segment_url': hdcore_sign}) + formats.append(entry) + mp4_urls = streams.get('pseudostreaming') or [] mp4_url = streams.get('mp4') if mp4_url: - _url = hls_url or hds_url - MP4_URL_TEMPLATE = '%s/%%s.%s' % (mp4_url.rpartition('/')[0], mp4_url.rpartition('.')[-1]) - for mp4_format in _url.split(','): - m = re.search('(?P<width>\d+)_(?P<height>\d+)_(?P<vbr>\d+)', mp4_format) - if not m: - continue - width = int(m.group('width')) - height = int(m.group('height')) - vbr = int(m.group('vbr')) - formats.append({ - 'url': MP4_URL_TEMPLATE % mp4_format, - 'format_id': 'mp4-%s' % vbr, - 'width': width, - 'height': height, - 'vbr': vbr, - 'preference': 1, + mp4_urls.append(mp4_url) + for mp4_url in mp4_urls: + format_info = { + 'url': mp4_url, + } + mobj = re.search('(\d+)_(\d+)_(\d+)', mp4_url) + if mobj: + tbr = int(mobj.group(3)) + format_info.update({ + 'width': int(mobj.group(1)), + 'height': int(mobj.group(2)), + 'tbr': tbr, + 'format_id': 'mp4-%s' % tbr, }) - self._sort_formats(formats) + formats.append(format_info) + + info['formats'].extend(formats) - return { + self._sort_formats(info['formats']) + + info.update({ 'id': video_id, - 'title': self._live_title(data['title']), + 'title': self._live_title(data['title']) if stream_type == 'live' else data['title'], 'description': data['description'], 'thumbnail': data['images']['main'] + '?t[]=900x506q80', 'timestamp': data['published'], 'duration': float_or_none(data['duration'], 1000), 'view_count': data['displays'], - 'formats': formats, 'is_live': True if stream_type == 'live' else False, - } + }) + return info class BTArticleIE(InfoExtractor): @@ -161,7 +207,7 @@ class BTArticleIE(InfoExtractor): _VALID_URL = 'http://(?:www\.)?bt\.no/(?:[^/]+/)+(?P<id>[^/]+)-\d+\.html' _TEST = { 'url': 'http://www.bt.no/nyheter/lokalt/Kjemper-for-internatet-1788214.html', - 'md5': 'd055e8ee918ef2844745fcfd1a4175fb', + 'md5': '2acbe8ad129b3469d5ae51b1158878df', 'info_dict': { 'id': '23199', 'ext': 'mp4', @@ -178,15 +224,15 @@ class BTArticleIE(InfoExtractor): def _real_extract(self, url): webpage = self._download_webpage(url, self._match_id(url)) video_id = self._search_regex( - r'SVP\.Player\.load\(\s*(\d+)', webpage, 'video id') - return self.url_result('vgtv:bt:%s' % video_id, 'VGTV') + r'<video[^>]+data-id="(\d+)"', webpage, 'video id') + return self.url_result('bttv:%s' % video_id, 'VGTV') class BTVestlendingenIE(InfoExtractor): IE_NAME = 'bt:vestlendingen' IE_DESC = 'Bergens Tidende - Vestlendingen' _VALID_URL = 'http://(?:www\.)?bt\.no/spesial/vestlendingen/#!/(?P<id>\d+)' - _TEST = { + _TESTS = [{ 'url': 'http://www.bt.no/spesial/vestlendingen/#!/86588', 'md5': 'd7d17e3337dc80de6d3a540aefbe441b', 'info_dict': { @@ -197,7 +243,19 @@ class BTVestlendingenIE(InfoExtractor): 'timestamp': 1430473209, 'upload_date': '20150501', }, - } + 'skip': '404 Error', + }, { + 'url': 'http://www.bt.no/spesial/vestlendingen/#!/86255', + 'md5': 'a2893f8632e96389f4bdf36aa9463ceb', + 'info_dict': { + 'id': '86255', + 'ext': 'mov', + 'title': 'Du må tåle å fryse og være sulten', + 'description': 'md5:b8046f4d022d5830ddab04865791d063', + 'upload_date': '20150321', + 'timestamp': 1426942023, + }, + }] def _real_extract(self, url): - return self.url_result('xstream:btno:%s' % self._match_id(url), 'Xstream') + return self.url_result('bttv:%s' % self._match_id(url), 'VGTV') diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py index 01af7a995..3db6286e4 100644 --- a/youtube_dl/extractor/vice.py +++ b/youtube_dl/extractor/vice.py @@ -15,6 +15,7 @@ class ViceIE(InfoExtractor): 'id': '43cW1mYzpia9IlestBjVpd23Yu3afAfp', 'ext': 'mp4', 'title': 'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov', + 'duration': 725.983, }, 'params': { # Requires ffmpeg (m3u8 manifest) diff --git a/youtube_dl/extractor/viddler.py b/youtube_dl/extractor/viddler.py index 8516a2940..40ffbad2a 100644 --- a/youtube_dl/extractor/viddler.py +++ b/youtube_dl/extractor/viddler.py @@ -4,9 +4,7 @@ from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, -) -from ..compat import ( - compat_urllib_request + sanitized_Request, ) @@ -65,7 +63,7 @@ class ViddlerIE(InfoExtractor): 'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?video_id=%s&key=v0vhrt7bg2xq1vyxhkct' % video_id) headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'} - request = compat_urllib_request.Request(json_url, None, headers) + request = sanitized_Request(json_url, None, headers) data = self._download_json(request, video_id)['video'] formats = [] diff --git a/youtube_dl/extractor/videofyme.py b/youtube_dl/extractor/videofyme.py index 94f9e9be9..cd3f50a63 100644 --- a/youtube_dl/extractor/videofyme.py +++ b/youtube_dl/extractor/videofyme.py @@ -2,8 +2,8 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( - find_xpath_attr, int_or_none, + parse_iso8601, ) @@ -18,33 +18,35 @@ class VideofyMeIE(InfoExtractor): 'id': '1100701', 'ext': 'mp4', 'title': 'This is VideofyMe', - 'description': None, + 'description': '', + 'upload_date': '20130326', + 'timestamp': 1364288959, 'uploader': 'VideofyMe', 'uploader_id': 'thisisvideofyme', 'view_count': int, + 'likes': int, + 'comment_count': int, }, - } def _real_extract(self, url): video_id = self._match_id(url) - config = self._download_xml('http://sunshine.videofy.me/?videoId=%s' % video_id, - video_id) - video = config.find('video') - sources = video.find('sources') - url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key) - for key in ['on', 'av', 'off']] if node is not None) - video_url = url_node.find('url').text - view_count = int_or_none(self._search_regex( - r'([0-9]+)', video.find('views').text, 'view count', fatal=False)) + + config = self._download_json('http://vf-player-info-loader.herokuapp.com/%s.json' % video_id, video_id)['videoinfo'] + + video = config.get('video') + blog = config.get('blog', {}) return { 'id': video_id, - 'title': video.find('title').text, - 'url': video_url, - 'thumbnail': video.find('thumb').text, - 'description': video.find('description').text, - 'uploader': config.find('blog/name').text, - 'uploader_id': video.find('identifier').text, - 'view_count': view_count, + 'title': video['title'], + 'url': video['sources']['source']['url'], + 'thumbnail': video.get('thumb'), + 'description': video.get('description'), + 'timestamp': parse_iso8601(video.get('date')), + 'uploader': blog.get('name'), + 'uploader_id': blog.get('identifier'), + 'view_count': int_or_none(self._search_regex(r'([0-9]+)', video.get('views'), 'view count', fatal=False)), + 'likes': int_or_none(video.get('likes')), + 'comment_count': int_or_none(video.get('nrOfComments')), } diff --git a/youtube_dl/extractor/videolecturesnet.py b/youtube_dl/extractor/videolecturesnet.py deleted file mode 100644 index ef2da5632..000000000 --- a/youtube_dl/extractor/videolecturesnet.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..utils import ( - find_xpath_attr, - int_or_none, - parse_duration, - unified_strdate, -) - - -class VideoLecturesNetIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?videolectures\.net/(?P<id>[^/#?]+)/*(?:[#?].*)?$' - IE_NAME = 'videolectures.net' - - _TEST = { - 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/', - 'info_dict': { - 'id': 'promogram_igor_mekjavic_eng', - 'ext': 'mp4', - 'title': 'Automatics, robotics and biocybernetics', - 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', - 'upload_date': '20130627', - 'duration': 565, - 'thumbnail': 're:http://.*\.jpg', - }, - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - smil_url = 'http://videolectures.net/%s/video/1/smil.xml' % video_id - smil = self._download_xml(smil_url, video_id) - - title = find_xpath_attr(smil, './/meta', 'name', 'title').attrib['content'] - description_el = find_xpath_attr(smil, './/meta', 'name', 'abstract') - description = ( - None if description_el is None - else description_el.attrib['content']) - upload_date = unified_strdate( - find_xpath_attr(smil, './/meta', 'name', 'date').attrib['content']) - - switch = smil.find('.//switch') - duration = parse_duration(switch.attrib.get('dur')) - thumbnail_el = find_xpath_attr(switch, './image', 'type', 'thumbnail') - thumbnail = ( - None if thumbnail_el is None else thumbnail_el.attrib.get('src')) - - formats = [] - for v in switch.findall('./video'): - proto = v.attrib.get('proto') - if proto not in ['http', 'rtmp']: - continue - f = { - 'width': int_or_none(v.attrib.get('width')), - 'height': int_or_none(v.attrib.get('height')), - 'filesize': int_or_none(v.attrib.get('size')), - 'tbr': int_or_none(v.attrib.get('systemBitrate')) / 1000.0, - 'ext': v.attrib.get('ext'), - } - src = v.attrib['src'] - if proto == 'http': - if self._is_valid_url(src, video_id): - f['url'] = src - formats.append(f) - elif proto == 'rtmp': - f.update({ - 'url': v.attrib['streamer'], - 'play_path': src, - 'rtmp_real_time': True, - }) - formats.append(f) - self._sort_formats(formats) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'upload_date': upload_date, - 'duration': duration, - 'thumbnail': thumbnail, - 'formats': formats, - } diff --git a/youtube_dl/extractor/videomega.py b/youtube_dl/extractor/videomega.py index 78ff6310a..87aca327b 100644 --- a/youtube_dl/extractor/videomega.py +++ b/youtube_dl/extractor/videomega.py @@ -4,7 +4,7 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import compat_urllib_request +from ..utils import sanitized_Request class VideoMegaIE(InfoExtractor): @@ -30,7 +30,7 @@ class VideoMegaIE(InfoExtractor): video_id = self._match_id(url) iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id - req = compat_urllib_request.Request(iframe_url) + req = sanitized_Request(iframe_url) req.add_header('Referer', url) req.add_header('Cookie', 'noadvtday=0') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/videoweed.py b/youtube_dl/extractor/videoweed.py deleted file mode 100644 index ca2e50935..000000000 --- a/youtube_dl/extractor/videoweed.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import unicode_literals - -from .novamov import NovaMovIE - - -class VideoWeedIE(NovaMovIE): - IE_NAME = 'videoweed' - IE_DESC = 'VideoWeed' - - _VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'videoweed\.(?:es|com)'} - - _HOST = 'www.videoweed.es' - - _FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<' - _TITLE_REGEX = r'<h1 class="text_shadow">([^<]+)</h1>' - - _TEST = { - 'url': 'http://www.videoweed.es/file/b42178afbea14', - 'md5': 'abd31a2132947262c50429e1d16c1bfd', - 'info_dict': { - 'id': 'b42178afbea14', - 'ext': 'flv', - 'title': 'optical illusion dissapeared image magic illusion', - 'description': '' - }, - } diff --git a/youtube_dl/extractor/vidme.py b/youtube_dl/extractor/vidme.py index 078d283b2..3d63ed4f0 100644 --- a/youtube_dl/extractor/vidme.py +++ b/youtube_dl/extractor/vidme.py @@ -14,7 +14,7 @@ class VidmeIE(InfoExtractor): _VALID_URL = r'https?://vid\.me/(?:e/)?(?P<id>[\da-zA-Z]+)' _TESTS = [{ 'url': 'https://vid.me/QNB', - 'md5': 'c62f1156138dc3323902188c5b5a8bd6', + 'md5': 'f42d05e7149aeaec5c037b17e5d3dc82', 'info_dict': { 'id': 'QNB', 'ext': 'mp4', @@ -93,6 +93,39 @@ class VidmeIE(InfoExtractor): 'params': { 'skip_download': True, }, + }, { + # nsfw, user-disabled + 'url': 'https://vid.me/dzGJ', + 'only_matching': True, + }, { + # suspended + 'url': 'https://vid.me/Ox3G', + 'only_matching': True, + }, { + # deleted + 'url': 'https://vid.me/KTPm', + 'only_matching': True, + }, { + # no formats in the API response + 'url': 'https://vid.me/e5g', + 'info_dict': { + 'id': 'e5g', + 'ext': 'mp4', + 'title': 'Video upload (e5g)', + 'thumbnail': 're:^https?://.*\.jpg', + 'timestamp': 1401480195, + 'upload_date': '20140530', + 'uploader': None, + 'uploader_id': None, + 'age_limit': 0, + 'duration': 483, + 'view_count': int, + 'like_count': int, + 'comment_count': int, + }, + 'params': { + 'skip_download': True, + }, }] def _real_extract(self, url): @@ -114,6 +147,17 @@ class VidmeIE(InfoExtractor): video = response['video'] + if video.get('state') == 'deleted': + raise ExtractorError( + 'Vidme said: Sorry, this video has been deleted.', + expected=True) + + if video.get('state') in ('user-disabled', 'suspended'): + raise ExtractorError( + 'Vidme said: This video has been suspended either due to a copyright claim, ' + 'or for violating the terms of use.', + expected=True) + formats = [{ 'format_id': f.get('type'), 'url': f['uri'], @@ -121,6 +165,14 @@ class VidmeIE(InfoExtractor): 'height': int_or_none(f.get('height')), 'preference': 0 if f.get('type', '').endswith('clip') else 1, } for f in video.get('formats', []) if f.get('uri')] + + if not formats and video.get('complete_url'): + formats.append({ + 'url': video.get('complete_url'), + 'width': int_or_none(video.get('width')), + 'height': int_or_none(video.get('height')), + }) + self._sort_formats(formats) title = video['title'] @@ -137,7 +189,7 @@ class VidmeIE(InfoExtractor): return { 'id': video_id, - 'title': title, + 'title': title or 'Video upload (%s)' % video_id, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, diff --git a/youtube_dl/extractor/vidzi.py b/youtube_dl/extractor/vidzi.py index 08a5a7b8d..2ba9f31df 100644 --- a/youtube_dl/extractor/vidzi.py +++ b/youtube_dl/extractor/vidzi.py @@ -20,8 +20,14 @@ class VidziIE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - video_url = self._html_search_regex( - r'{\s*file\s*:\s*"([^"]+)"\s*}', webpage, 'video url') + video_host = self._html_search_regex( + r'id=\'vplayer\'><img src="http://(.*?)/i', webpage, + 'video host') + video_hash = self._html_search_regex( + r'\|([a-z0-9]+)\|hls\|type', webpage, 'video_hash') + ext = self._html_search_regex( + r'\|tracks\|([a-z0-9]+)\|', webpage, 'video ext') + video_url = 'http://' + video_host + '/' + video_hash + '/v.' + ext title = self._html_search_regex( r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title') diff --git a/youtube_dl/extractor/viewster.py b/youtube_dl/extractor/viewster.py index 632e57fb4..185b1c119 100644 --- a/youtube_dl/extractor/viewster.py +++ b/youtube_dl/extractor/viewster.py @@ -4,7 +4,6 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_HTTPError, - compat_urllib_request, compat_urllib_parse, compat_urllib_parse_unquote, ) @@ -13,6 +12,7 @@ from ..utils import ( ExtractorError, int_or_none, parse_iso8601, + sanitized_Request, HEADRequest, ) @@ -76,7 +76,7 @@ class ViewsterIE(InfoExtractor): _ACCEPT_HEADER = 'application/json, text/javascript, */*; q=0.01' def _download_json(self, url, video_id, note='Downloading JSON metadata', fatal=True): - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) request.add_header('Accept', self._ACCEPT_HEADER) request.add_header('Auth-token', self._AUTH_TOKEN) return super(ViewsterIE, self)._download_json(request, video_id, note, fatal=fatal) @@ -131,10 +131,11 @@ class ViewsterIE(InfoExtractor): formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds')) elif ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( + m3u8_formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls', - fatal=False # m3u8 sometimes fail - )) + fatal=False) # m3u8 sometimes fail + if m3u8_formats: + formats.extend(m3u8_formats) else: format_id = media.get('Bitrate') f = { diff --git a/youtube_dl/extractor/viidea.py b/youtube_dl/extractor/viidea.py new file mode 100644 index 000000000..525e303d4 --- /dev/null +++ b/youtube_dl/extractor/viidea.py @@ -0,0 +1,188 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import ( + compat_urlparse, + compat_str, +) +from ..utils import ( + parse_duration, + js_to_json, + parse_iso8601, +) + + +class ViideaIE(InfoExtractor): + _VALID_URL = r'''(?x)http://(?:www\.)?(?: + videolectures\.net| + flexilearn\.viidea\.net| + presentations\.ocwconsortium\.org| + video\.travel-zoom\.si| + video\.pomp-forum\.si| + tv\.nil\.si| + video\.hekovnik.com| + video\.szko\.si| + kpk\.viidea\.com| + inside\.viidea\.net| + video\.kiberpipa\.org| + bvvideo\.si| + kongres\.viidea\.net| + edemokracija\.viidea\.com + )(?:/lecture)?/(?P<id>[^/]+)(?:/video/(?P<part>\d+))?/*(?:[#?].*)?$''' + + _TESTS = [{ + 'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/', + 'info_dict': { + 'id': '20171', + 'display_id': 'promogram_igor_mekjavic_eng', + 'ext': 'mp4', + 'title': 'Automatics, robotics and biocybernetics', + 'description': 'md5:815fc1deb6b3a2bff99de2d5325be482', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1372349289, + 'upload_date': '20130627', + 'duration': 565, + }, + }, { + # video with invalid direct format links (HTTP 403) + 'url': 'http://videolectures.net/russir2010_filippova_nlp/', + 'info_dict': { + 'id': '14891', + 'display_id': 'russir2010_filippova_nlp', + 'ext': 'flv', + 'title': 'NLP at Google', + 'description': 'md5:fc7a6d9bf0302d7cc0e53f7ca23747b3', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1284375600, + 'upload_date': '20100913', + 'duration': 5352, + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + }, { + # event playlist + 'url': 'http://videolectures.net/deeplearning2015_montreal/', + 'info_dict': { + 'id': '23181', + 'title': 'Deep Learning Summer School, Montreal 2015', + 'description': 'md5:0533a85e4bd918df52a01f0e1ebe87b7', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1438560000, + }, + 'playlist_count': 30, + }, { + # multi part lecture + 'url': 'http://videolectures.net/mlss09uk_bishop_ibi/', + 'info_dict': { + 'id': '9737', + 'display_id': 'mlss09uk_bishop_ibi', + 'title': 'Introduction To Bayesian Inference', + 'thumbnail': 're:http://.*\.jpg', + 'timestamp': 1251622800, + }, + 'playlist': [{ + 'info_dict': { + 'id': '9737_part1', + 'display_id': 'mlss09uk_bishop_ibi_part1', + 'ext': 'wmv', + 'title': 'Introduction To Bayesian Inference (Part 1)', + 'thumbnail': 're:http://.*\.jpg', + 'duration': 4622, + 'timestamp': 1251622800, + 'upload_date': '20090830', + }, + }, { + 'info_dict': { + 'id': '9737_part2', + 'display_id': 'mlss09uk_bishop_ibi_part2', + 'ext': 'wmv', + 'title': 'Introduction To Bayesian Inference (Part 2)', + 'thumbnail': 're:http://.*\.jpg', + 'duration': 5641, + 'timestamp': 1251622800, + 'upload_date': '20090830', + }, + }], + 'playlist_count': 2, + }] + + def _real_extract(self, url): + lecture_slug, explicit_part_id = re.match(self._VALID_URL, url).groups() + + webpage = self._download_webpage(url, lecture_slug) + + cfg = self._parse_json(self._search_regex( + [r'cfg\s*:\s*({.+?})\s*,\s*[\da-zA-Z_]+\s*:\s*\(?\s*function', + r'cfg\s*:\s*({[^}]+})'], + webpage, 'cfg'), lecture_slug, js_to_json) + + lecture_id = compat_str(cfg['obj_id']) + + base_url = self._proto_relative_url(cfg['livepipe'], 'http:') + + lecture_data = self._download_json( + '%s/site/api/lecture/%s?format=json' % (base_url, lecture_id), + lecture_id)['lecture'][0] + + lecture_info = { + 'id': lecture_id, + 'display_id': lecture_slug, + 'title': lecture_data['title'], + 'timestamp': parse_iso8601(lecture_data.get('time')), + 'description': lecture_data.get('description_wiki'), + 'thumbnail': lecture_data.get('thumb'), + } + + playlist_entries = [] + lecture_type = lecture_data.get('type') + parts = [compat_str(video) for video in cfg.get('videos', [])] + if parts: + multipart = len(parts) > 1 + + def extract_part(part_id): + smil_url = '%s/%s/video/%s/smil.xml' % (base_url, lecture_slug, part_id) + smil = self._download_smil(smil_url, lecture_id) + info = self._parse_smil(smil, smil_url, lecture_id) + info['id'] = lecture_id if not multipart else '%s_part%s' % (lecture_id, part_id) + info['display_id'] = lecture_slug if not multipart else '%s_part%s' % (lecture_slug, part_id) + if multipart: + info['title'] += ' (Part %s)' % part_id + switch = smil.find('.//switch') + if switch is not None: + info['duration'] = parse_duration(switch.attrib.get('dur')) + item_info = lecture_info.copy() + item_info.update(info) + return item_info + + if explicit_part_id or not multipart: + result = extract_part(explicit_part_id or parts[0]) + else: + result = { + '_type': 'multi_video', + 'entries': [extract_part(part) for part in parts], + } + result.update(lecture_info) + + # Immediately return explicitly requested part or non event item + if explicit_part_id or lecture_type != 'evt': + return result + + playlist_entries.append(result) + + # It's probably a playlist + if not parts or lecture_type == 'evt': + playlist_webpage = self._download_webpage( + '%s/site/ajax/drilldown/?id=%s' % (base_url, lecture_id), lecture_id) + entries = [ + self.url_result(compat_urlparse.urljoin(url, video_url), 'Viidea') + for _, video_url in re.findall( + r'<a[^>]+href=(["\'])(.+?)\1[^>]+id=["\']lec=\d+', playlist_webpage)] + playlist_entries.extend(entries) + + playlist = self.playlist_result(playlist_entries, lecture_id) + playlist.update(lecture_info) + return playlist diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py index ddbd395c8..433fc9914 100644 --- a/youtube_dl/extractor/viki.py +++ b/youtube_dl/extractor/viki.py @@ -7,14 +7,14 @@ import hmac import hashlib import itertools +from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, parse_age_limit, parse_iso8601, + sanitized_Request, ) -from ..compat import compat_urllib_request -from .common import InfoExtractor class VikiBaseIE(InfoExtractor): @@ -30,6 +30,12 @@ class VikiBaseIE(InfoExtractor): _token = None + _ERRORS = { + 'geo': 'Sorry, this content is not available in your region.', + 'upcoming': 'Sorry, this content is not yet available.', + # 'paywall': 'paywall', + } + def _prepare_call(self, path, timestamp=None, post_data=None): path += '?' if '?' not in path else '&' if not timestamp: @@ -43,7 +49,7 @@ class VikiBaseIE(InfoExtractor): hashlib.sha1 ).hexdigest() url = self._API_URL_TEMPLATE % (query, sig) - return compat_urllib_request.Request( + return sanitized_Request( url, json.dumps(post_data).encode('utf-8')) if post_data else url def _call_api(self, path, video_id, note, timestamp=None, post_data=None): @@ -67,6 +73,12 @@ class VikiBaseIE(InfoExtractor): '%s returned error: %s' % (self.IE_NAME, error), expected=True) + def _check_errors(self, data): + for reason, status in data.get('blocking', {}).items(): + if status and reason in self._ERRORS: + raise ExtractorError('%s said: %s' % ( + self.IE_NAME, self._ERRORS[reason]), expected=True) + def _real_initialize(self): self._login() @@ -193,6 +205,7 @@ class VikiIE(VikiBaseIE): 'timestamp': 1321985454, 'description': 'md5:44b1e46619df3a072294645c770cef36', 'title': 'Love In Magic', + 'age_limit': 13, }, }] @@ -202,6 +215,8 @@ class VikiIE(VikiBaseIE): video = self._call_api( 'videos/%s.json' % video_id, video_id, 'Downloading video JSON') + self._check_errors(video) + title = self.dict_selection(video.get('titles', {}), 'en') if not title: title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id @@ -262,8 +277,9 @@ class VikiIE(VikiBaseIE): r'^(\d+)[pP]$', format_id, 'height', default=None)) for protocol, format_dict in stream_dict.items(): if format_id == 'm3u8': - formats = self._extract_m3u8_formats( - format_dict['url'], video_id, 'mp4', m3u8_id='m3u8-%s' % protocol) + formats.extend(self._extract_m3u8_formats( + format_dict['url'], video_id, 'mp4', 'm3u8_native', + m3u8_id='m3u8-%s' % protocol, fatal=False)) else: formats.append({ 'url': format_dict['url'], @@ -315,6 +331,8 @@ class VikiChannelIE(VikiBaseIE): 'containers/%s.json' % channel_id, channel_id, 'Downloading channel JSON') + self._check_errors(channel) + title = self.dict_selection(channel['titles'], 'en') description = self.dict_selection(channel['descriptions'], 'en') diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index 50df79ca1..7af699982 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -8,21 +8,22 @@ import itertools from .common import InfoExtractor from ..compat import ( compat_HTTPError, - compat_urllib_parse, - compat_urllib_request, compat_urlparse, ) from ..utils import ( + encode_dict, ExtractorError, InAdvancePagedList, int_or_none, RegexNotFoundError, + sanitized_Request, smuggle_url, std_headers, unified_strdate, unsmuggle_url, urlencode_postdata, unescapeHTML, + parse_filesize, ) @@ -39,23 +40,31 @@ class VimeoBaseInfoExtractor(InfoExtractor): return self.report_login() webpage = self._download_webpage(self._LOGIN_URL, None, False) - token = self._extract_xsrft(webpage) - data = urlencode_postdata({ + token, vuid = self._extract_xsrft_and_vuid(webpage) + data = urlencode_postdata(encode_dict({ 'action': 'login', 'email': username, 'password': password, 'service': 'vimeo', 'token': token, - }) - login_request = compat_urllib_request.Request(self._LOGIN_URL, data) + })) + login_request = sanitized_Request(self._LOGIN_URL, data) login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') login_request.add_header('Referer', self._LOGIN_URL) + self._set_vimeo_cookie('vuid', vuid) self._download_webpage(login_request, None, False, 'Wrong login info') - def _extract_xsrft(self, webpage): - return self._search_regex( + def _extract_xsrft_and_vuid(self, webpage): + xsrft = self._search_regex( r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)', webpage, 'login token', group='xsrft') + vuid = self._search_regex( + r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1', + webpage, 'vuid', group='vuid') + return xsrft, vuid + + def _set_vimeo_cookie(self, name, value): + self._set_cookie('vimeo.com', name, value) class VimeoIE(VimeoBaseInfoExtractor): @@ -80,12 +89,12 @@ class VimeoIE(VimeoBaseInfoExtractor): 'info_dict': { 'id': '56015672', 'ext': 'mp4', - "upload_date": "20121220", - "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", - "uploader_id": "user7108434", - "uploader": "Filippo Valsorda", - "title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", - "duration": 10, + 'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", + 'description': 'md5:2d3305bad981a06ff79f027f19865021', + 'upload_date': '20121220', + 'uploader_id': 'user7108434', + 'uploader': 'Filippo Valsorda', + 'duration': 10, }, }, { @@ -98,7 +107,7 @@ class VimeoIE(VimeoBaseInfoExtractor): 'uploader_id': 'openstreetmapus', 'uploader': 'OpenStreetMap US', 'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography', - 'description': 'md5:380943ec71b89736ff4bf27183233d09', + 'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30', 'duration': 1595, }, }, @@ -128,7 +137,7 @@ class VimeoIE(VimeoBaseInfoExtractor): 'uploader_id': 'user18948128', 'uploader': 'Jaime Marquínez Ferrándiz', 'duration': 10, - 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.', + 'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026', }, 'params': { 'videopassword': 'youtube-dl', @@ -152,7 +161,6 @@ class VimeoIE(VimeoBaseInfoExtractor): }, { 'url': 'http://vimeo.com/76979871', - 'md5': '3363dd6ffebe3784d56f4132317fd446', 'note': 'Video with subtitles', 'info_dict': { 'id': '76979871', @@ -177,6 +185,29 @@ class VimeoIE(VimeoBaseInfoExtractor): 'uploader_id': 'user28849593', }, }, + { + # contains original format + 'url': 'https://vimeo.com/33951933', + 'md5': '53c688fa95a55bf4b7293d37a89c5c53', + 'info_dict': { + 'id': '33951933', + 'ext': 'mp4', + 'title': 'FOX CLASSICS - Forever Classic ID - A Full Minute', + 'uploader': 'The DMCI', + 'uploader_id': 'dmci', + 'upload_date': '20111220', + 'description': 'md5:ae23671e82d05415868f7ad1aec21147', + }, + }, + { + 'url': 'https://vimeo.com/109815029', + 'note': 'Video not completely processed, "failed" seed status', + 'only_matching': True, + }, + { + 'url': 'https://vimeo.com/groups/travelhd/videos/22439234', + 'only_matching': True, + }, ] @staticmethod @@ -198,17 +229,18 @@ class VimeoIE(VimeoBaseInfoExtractor): password = self._downloader.params.get('videopassword', None) if password is None: raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True) - token = self._extract_xsrft(webpage) - data = urlencode_postdata({ + token, vuid = self._extract_xsrft_and_vuid(webpage) + data = urlencode_postdata(encode_dict({ 'password': password, 'token': token, - }) + })) if url.startswith('http://'): # vimeo only supports https now, but the user can give an http url url = url.replace('http://', 'https://') - password_request = compat_urllib_request.Request(url + '/password', data) + password_request = sanitized_Request(url + '/password', data) password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') password_request.add_header('Referer', url) + self._set_vimeo_cookie('vuid', vuid) return self._download_webpage( password_request, video_id, 'Verifying the password', 'Wrong password') @@ -217,9 +249,9 @@ class VimeoIE(VimeoBaseInfoExtractor): password = self._downloader.params.get('videopassword', None) if password is None: raise ExtractorError('This video is protected by a password, use the --video-password option') - data = compat_urllib_parse.urlencode({'password': password}) + data = urlencode_postdata(encode_dict({'password': password})) pass_url = url + '/check-password' - password_request = compat_urllib_request.Request(pass_url, data) + password_request = sanitized_Request(pass_url, data) password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') return self._download_json( password_request, video_id, @@ -248,7 +280,7 @@ class VimeoIE(VimeoBaseInfoExtractor): url = 'https://vimeo.com/' + video_id # Retrieve video webpage to extract further information - request = compat_urllib_request.Request(url, None, headers) + request = sanitized_Request(url, None, headers) try: webpage = self._download_webpage(request, video_id) except ExtractorError as ee: @@ -268,20 +300,30 @@ class VimeoIE(VimeoBaseInfoExtractor): self.report_extraction(video_id) vimeo_config = self._search_regex( - r'vimeo\.config\s*=\s*({.+?});', webpage, + r'vimeo\.config\s*=\s*(?:({.+?})|_extend\([^,]+,\s+({.+?})\));', webpage, 'vimeo config', default=None) if vimeo_config: seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {}) if seed_status.get('state') == 'failed': raise ExtractorError( - '%s returned error: %s' % (self.IE_NAME, seed_status['title']), + '%s said: %s' % (self.IE_NAME, seed_status['title']), expected=True) # Extract the config JSON try: try: config_url = self._html_search_regex( - r' data-config-url="(.+?)"', webpage, 'config URL') + r' data-config-url="(.+?)"', webpage, + 'config URL', default=None) + if not config_url: + # Sometimes new react-based page is served instead of old one that require + # different config URL extraction approach (see + # https://github.com/rg3/youtube-dl/pull/7209) + vimeo_clip_page_config = self._search_regex( + r'vimeo\.clip_page_config\s*=\s*({.+?});', webpage, + 'vimeo clip page config') + config_url = self._parse_json( + vimeo_clip_page_config, video_id)['player']['config_url'] config_json = self._download_webpage(config_url, video_id) config = json.loads(config_json) except RegexNotFoundError: @@ -364,41 +406,42 @@ class VimeoIE(VimeoBaseInfoExtractor): like_count = None comment_count = None - # Vimeo specific: extract request signature and timestamp - sig = config['request']['signature'] - timestamp = config['request']['timestamp'] - - # Vimeo specific: extract video codec and quality information - # First consider quality, then codecs, then take everything - codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')] - files = {'hd': [], 'sd': [], 'other': []} - config_files = config["video"].get("files") or config["request"].get("files") - for codec_name, codec_extension in codecs: - for quality in config_files.get(codec_name, []): - format_id = '-'.join((codec_name, quality)).lower() - key = quality if quality in files else 'other' - video_url = None - if isinstance(config_files[codec_name], dict): - file_info = config_files[codec_name][quality] - video_url = file_info.get('url') - else: - file_info = {} - if video_url is None: - video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ - % (video_id, sig, timestamp, quality, codec_name.upper()) - - files[key].append({ - 'ext': codec_extension, - 'url': video_url, - 'format_id': format_id, - 'width': file_info.get('width'), - 'height': file_info.get('height'), - }) formats = [] - for key in ('other', 'sd', 'hd'): - formats += files[key] - if len(formats) == 0: - raise ExtractorError('No known codec found') + download_request = sanitized_Request('https://vimeo.com/%s?action=load_download_config' % video_id, headers={ + 'X-Requested-With': 'XMLHttpRequest'}) + download_data = self._download_json(download_request, video_id, fatal=False) + if download_data: + source_file = download_data.get('source_file') + if source_file and not source_file.get('is_cold') and not source_file.get('is_defrosting'): + formats.append({ + 'url': source_file['download_url'], + 'ext': source_file['extension'].lower(), + 'width': int_or_none(source_file.get('width')), + 'height': int_or_none(source_file.get('height')), + 'filesize': parse_filesize(source_file.get('size')), + 'format_id': source_file.get('public_name', 'Original'), + 'preference': 1, + }) + config_files = config['video'].get('files') or config['request'].get('files', {}) + for f in config_files.get('progressive', []): + video_url = f.get('url') + if not video_url: + continue + formats.append({ + 'url': video_url, + 'format_id': 'http-%s' % f.get('quality'), + 'width': int_or_none(f.get('width')), + 'height': int_or_none(f.get('height')), + 'fps': int_or_none(f.get('fps')), + 'tbr': int_or_none(f.get('bitrate')), + }) + m3u8_url = config_files.get('hls', {}).get('url') + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) + # Bitrates are completely broken. Single m3u8 may contain entries in kbps and bps + # at the same time without actual units specified. This lead to wrong sorting. + self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'format_id')) subtitles = {} text_tracks = config['request'].get('text_tracks') @@ -459,23 +502,23 @@ class VimeoChannelIE(VimeoBaseInfoExtractor): if password is None: raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True) fields = self._hidden_inputs(login_form) - token = self._extract_xsrft(webpage) + token, vuid = self._extract_xsrft_and_vuid(webpage) fields['token'] = token fields['password'] = password - post = urlencode_postdata(fields) + post = urlencode_postdata(encode_dict(fields)) password_path = self._search_regex( r'action="([^"]+)"', login_form, 'password URL') password_url = compat_urlparse.urljoin(page_url, password_path) - password_request = compat_urllib_request.Request(password_url, post) + password_request = sanitized_Request(password_url, post) password_request.add_header('Content-type', 'application/x-www-form-urlencoded') - self._set_cookie('vimeo.com', 'xsrft', token) + self._set_vimeo_cookie('vuid', vuid) + self._set_vimeo_cookie('xsrft', token) return self._download_webpage( password_request, list_id, 'Verifying the password', 'Wrong password') - def _extract_videos(self, list_id, base_url): - video_ids = [] + def _title_and_entries(self, list_id, base_url): for pagenum in itertools.count(1): page_url = self._page_url(base_url, pagenum) webpage = self._download_webpage( @@ -484,18 +527,18 @@ class VimeoChannelIE(VimeoBaseInfoExtractor): if pagenum == 1: webpage = self._login_list_password(page_url, list_id, webpage) + yield self._extract_list_title(webpage) + + for video_id in re.findall(r'id="clip_(\d+?)"', webpage): + yield self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo') - video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage)) if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None: break - entries = [self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo') - for video_id in video_ids] - return {'_type': 'playlist', - 'id': list_id, - 'title': self._extract_list_title(webpage), - 'entries': entries, - } + def _extract_videos(self, list_id, base_url): + title_and_entries = self._title_and_entries(list_id, base_url) + list_title = next(title_and_entries) + return self.playlist_result(title_and_entries, list_id, list_title) def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -556,7 +599,7 @@ class VimeoAlbumIE(VimeoChannelIE): class VimeoGroupsIE(VimeoAlbumIE): IE_NAME = 'vimeo:group' - _VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)' + _VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)(?:/(?!videos?/\d+)|$)' _TESTS = [{ 'url': 'https://vimeo.com/groups/rolexawards', 'info_dict': { @@ -625,7 +668,7 @@ class VimeoWatchLaterIE(VimeoChannelIE): def _page_url(self, base_url, pagenum): url = '%s/page:%d/' % (base_url, pagenum) - request = compat_urllib_request.Request(url) + request = sanitized_Request(url) # Set the header to get a partial html page with the ids, # the normal page doesn't contain them. request.add_header('X-Requested-With', 'XMLHttpRequest') diff --git a/youtube_dl/extractor/vine.py b/youtube_dl/extractor/vine.py index c733a48fa..cb2a4b0b5 100644 --- a/youtube_dl/extractor/vine.py +++ b/youtube_dl/extractor/vine.py @@ -1,10 +1,14 @@ +# coding: utf-8 from __future__ import unicode_literals import re import itertools from .common import InfoExtractor -from ..utils import unified_strdate +from ..utils import ( + int_or_none, + unified_strdate, +) class VineIE(InfoExtractor): @@ -17,10 +21,12 @@ class VineIE(InfoExtractor): 'ext': 'mp4', 'title': 'Chicken.', 'alt_title': 'Vine by Jack Dorsey', - 'description': 'Chicken.', 'upload_date': '20130519', 'uploader': 'Jack Dorsey', 'uploader_id': '76', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, }, }, { 'url': 'https://vine.co/v/MYxVapFvz2z', @@ -29,11 +35,13 @@ class VineIE(InfoExtractor): 'id': 'MYxVapFvz2z', 'ext': 'mp4', 'title': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14', - 'alt_title': 'Vine by Luna', - 'description': 'Fuck Da Police #Mikebrown #justice #ferguson #prayforferguson #protesting #NMOS14', + 'alt_title': 'Vine by Mars Ruiz', 'upload_date': '20140815', - 'uploader': 'Luna', + 'uploader': 'Mars Ruiz', 'uploader_id': '1102363502380728320', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, }, }, { 'url': 'https://vine.co/v/bxVjBbZlPUH', @@ -43,14 +51,33 @@ class VineIE(InfoExtractor): 'ext': 'mp4', 'title': '#mw3 #ac130 #killcam #angelofdeath', 'alt_title': 'Vine by Z3k3', - 'description': '#mw3 #ac130 #killcam #angelofdeath', 'upload_date': '20130430', 'uploader': 'Z3k3', 'uploader_id': '936470460173008896', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, }, }, { 'url': 'https://vine.co/oembed/MYxVapFvz2z.json', 'only_matching': True, + }, { + 'url': 'https://vine.co/v/e192BnZnZ9V', + 'info_dict': { + 'id': 'e192BnZnZ9V', + 'ext': 'mp4', + 'title': 'ยิ้ม~ เขิน~ อาย~ น่าร้ากอ้ะ >//< @n_whitewo @orlameena #lovesicktheseries #lovesickseason2', + 'alt_title': 'Vine by Pimry_zaa', + 'upload_date': '20150705', + 'uploader': 'Pimry_zaa', + 'uploader_id': '1135760698325307392', + 'like_count': int, + 'comment_count': int, + 'repost_count': int, + }, + 'params': { + 'skip_download': True, + }, }] def _real_extract(self, url): @@ -58,32 +85,33 @@ class VineIE(InfoExtractor): webpage = self._download_webpage('https://vine.co/v/' + video_id, video_id) data = self._parse_json( - self._html_search_regex( - r'window\.POST_DATA = { %s: ({.+?}) };\s*</script>' % video_id, + self._search_regex( + r'window\.POST_DATA\s*=\s*{\s*%s\s*:\s*({.+?})\s*};\s*</script>' % video_id, webpage, 'vine data'), video_id) formats = [{ 'format_id': '%(format)s-%(rate)s' % f, - 'vcodec': f['format'], - 'quality': f['rate'], + 'vcodec': f.get('format'), + 'quality': f.get('rate'), 'url': f['videoUrl'], - } for f in data['videoUrls']] + } for f in data['videoUrls'] if f.get('videoUrl')] self._sort_formats(formats) + username = data.get('username') + return { 'id': video_id, - 'title': self._og_search_title(webpage), - 'alt_title': self._og_search_description(webpage, default=None), - 'description': data['description'], - 'thumbnail': data['thumbnailUrl'], - 'upload_date': unified_strdate(data['created']), - 'uploader': data['username'], - 'uploader_id': data['userIdStr'], - 'like_count': data['likes']['count'], - 'comment_count': data['comments']['count'], - 'repost_count': data['reposts']['count'], + 'title': data.get('description') or self._og_search_title(webpage), + 'alt_title': 'Vine by %s' % username if username else self._og_search_description(webpage, default=None), + 'thumbnail': data.get('thumbnailUrl'), + 'upload_date': unified_strdate(data.get('created')), + 'uploader': username, + 'uploader_id': data.get('userIdStr'), + 'like_count': int_or_none(data.get('likes', {}).get('count')), + 'comment_count': int_or_none(data.get('comments', {}).get('count')), + 'repost_count': int_or_none(data.get('reposts', {}).get('count')), 'formats': formats, } diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py index c30c5a8e5..90557fa61 100644 --- a/youtube_dl/extractor/vk.py +++ b/youtube_dl/extractor/vk.py @@ -8,15 +8,17 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, ) from ..utils import ( ExtractorError, orderedSet, + sanitized_Request, str_to_int, unescapeHTML, unified_strdate, ) +from .vimeo import VimeoIE +from .pladform import PladformIE class VKIE(InfoExtractor): @@ -163,6 +165,11 @@ class VKIE(InfoExtractor): # vk wrapper 'url': 'http://www.biqle.ru/watch/847655_160197695', 'only_matching': True, + }, + { + # pladform embed + 'url': 'https://vk.com/video-76116461_171554880', + 'only_matching': True, } ] @@ -181,7 +188,7 @@ class VKIE(InfoExtractor): 'pass': password.encode('cp1251'), }) - request = compat_urllib_request.Request( + request = sanitized_Request( 'https://login.vk.com/?act=login', compat_urllib_parse.urlencode(login_form).encode('utf-8')) login_page = self._download_webpage( @@ -249,10 +256,17 @@ class VKIE(InfoExtractor): if youtube_url: return self.url_result(youtube_url, 'Youtube') + vimeo_url = VimeoIE._extract_vimeo_url(url, info_page) + if vimeo_url is not None: + return self.url_result(vimeo_url) + + pladform_url = PladformIE._extract_url(info_page) + if pladform_url: + return self.url_result(pladform_url) + m_rutube = re.search( r'\ssrc="((?:https?:)?//rutube\.ru\\?/video\\?/embed(?:.*?))\\?"', info_page) if m_rutube is not None: - self.to_screen('rutube video detected') rutube_url = self._proto_relative_url( m_rutube.group(1).replace('\\', '')) return self.url_result(rutube_url) @@ -276,9 +290,13 @@ class VKIE(InfoExtractor): mobj.group(1) + ' ' + mobj.group(2) upload_date = unified_strdate(mobj.group(1) + ' ' + mobj.group(2)) - view_count = str_to_int(self._search_regex( - r'"mv_views_count_number"[^>]*>([\d,.]+) views<', - info_page, 'view count', fatal=False)) + view_count = None + views = self._html_search_regex( + r'"mv_views_count_number"[^>]*>(.+?\bviews?)<', + info_page, 'view count', fatal=False) + if views: + view_count = str_to_int(self._search_regex( + r'([\d,.]+)', views, 'view count', fatal=False)) formats = [{ 'format_id': k, diff --git a/youtube_dl/extractor/vodlocker.py b/youtube_dl/extractor/vodlocker.py index ccf1928b5..357594a11 100644 --- a/youtube_dl/extractor/vodlocker.py +++ b/youtube_dl/extractor/vodlocker.py @@ -2,14 +2,15 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, +from ..compat import compat_urllib_parse +from ..utils import ( + ExtractorError, + sanitized_Request, ) class VodlockerIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?P<id>[0-9a-zA-Z]+)(?:\..*?)?' + _VALID_URL = r'https?://(?:www\.)?vodlocker\.com/(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:\..*?)?' _TESTS = [{ 'url': 'http://vodlocker.com/e8wvyzz4sl42', @@ -26,12 +27,18 @@ class VodlockerIE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) + if any(p in webpage for p in ( + '>THIS FILE WAS DELETED<', + '>File Not Found<', + 'The file you were looking for could not be found, sorry for any inconvenience.<')): + raise ExtractorError('Video %s does not exist' % video_id, expected=True) + fields = self._hidden_inputs(webpage) if fields['op'] == 'download1': self._sleep(3, video_id) # they do detect when requests happen too fast! post = compat_urllib_parse.urlencode(fields) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage( req, video_id, 'Downloading video page') diff --git a/youtube_dl/extractor/voicerepublic.py b/youtube_dl/extractor/voicerepublic.py index 254383d6c..93d15a556 100644 --- a/youtube_dl/extractor/voicerepublic.py +++ b/youtube_dl/extractor/voicerepublic.py @@ -3,14 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_urlparse from ..utils import ( ExtractorError, determine_ext, int_or_none, + sanitized_Request, ) @@ -37,7 +35,7 @@ class VoiceRepublicIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) - req = compat_urllib_request.Request( + req = sanitized_Request( compat_urlparse.urljoin(url, '/talks/%s' % display_id)) # Older versions of Firefox get redirected to an "upgrade browser" page req.add_header('User-Agent', 'youtube-dl') diff --git a/youtube_dl/extractor/wdr.py b/youtube_dl/extractor/wdr.py index b46802306..a851578e0 100644 --- a/youtube_dl/extractor/wdr.py +++ b/youtube_dl/extractor/wdr.py @@ -10,8 +10,8 @@ from ..compat import ( compat_urlparse, ) from ..utils import ( - determine_ext, unified_strdate, + qualities, ) @@ -33,6 +33,7 @@ class WDRIE(InfoExtractor): 'params': { 'skip_download': True, }, + 'skip': 'Page Not Found', }, { 'url': 'http://www1.wdr.de/themen/av/videomargaspiegelisttot101-videoplayer.html', @@ -47,6 +48,7 @@ class WDRIE(InfoExtractor): 'params': { 'skip_download': True, }, + 'skip': 'Page Not Found', }, { 'url': 'http://www1.wdr.de/themen/kultur/audioerlebtegeschichtenmargaspiegel100-audioplayer.html', @@ -71,6 +73,7 @@ class WDRIE(InfoExtractor): 'upload_date': '20140717', 'is_live': False }, + 'skip': 'Page Not Found', }, { 'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html', @@ -83,10 +86,10 @@ class WDRIE(InfoExtractor): 'url': 'http://www1.wdr.de/mediathek/video/livestream/index.html', 'info_dict': { 'id': 'mdb-103364', - 'title': 're:^WDR Fernsehen [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'title': 're:^WDR Fernsehen Live [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'md5:ae2ff888510623bf8d4b115f95a9b7c9', 'ext': 'flv', - 'upload_date': '20150212', + 'upload_date': '20150101', 'is_live': True }, 'params': { @@ -105,7 +108,9 @@ class WDRIE(InfoExtractor): if mobj.group('player') is None: entries = [ self.url_result(page_url + href, 'WDR') - for href in re.findall(r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX, webpage) + for href in re.findall( + r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX, + webpage) ] if entries: # Playlist page @@ -130,8 +135,8 @@ class WDRIE(InfoExtractor): note='Downloading playlist page %d' % page_num) return self.playlist_result(entries, page_id) - flashvars = compat_parse_qs( - self._html_search_regex(r'<param name="flashvars" value="([^"]+)"', webpage, 'flashvars')) + flashvars = compat_parse_qs(self._html_search_regex( + r'<param name="flashvars" value="([^"]+)"', webpage, 'flashvars')) page_id = flashvars['trackerClipId'][0] video_url = flashvars['dslSrc'][0] @@ -145,30 +150,60 @@ class WDRIE(InfoExtractor): if 'trackerClipAirTime' in flashvars: upload_date = flashvars['trackerClipAirTime'][0] else: - upload_date = self._html_search_meta('DC.Date', webpage, 'upload date') + upload_date = self._html_search_meta( + 'DC.Date', webpage, 'upload date') if upload_date: upload_date = unified_strdate(upload_date) + formats = [] + preference = qualities(['S', 'M', 'L', 'XL']) + if video_url.endswith('.f4m'): - video_url += '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18' - ext = 'flv' + formats.extend(self._extract_f4m_formats( + video_url + '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18', page_id, + f4m_id='hds', fatal=False)) elif video_url.endswith('.smil'): - fmt = self._extract_smil_formats(video_url, page_id)[0] - video_url = fmt['url'] - sep = '&' if '?' in video_url else '?' - video_url += sep - video_url += 'hdcore=3.3.0&plugin=aasp-3.3.0.99.43' - ext = fmt['ext'] + formats.extend(self._extract_smil_formats( + video_url, page_id, False, { + 'hdcore': '3.3.0', + 'plugin': 'aasp-3.3.0.99.43', + })) else: - ext = determine_ext(video_url) + formats.append({ + 'url': video_url, + 'http_headers': { + 'User-Agent': 'mobile', + }, + }) + + m3u8_url = self._search_regex( + r'rel="adaptiv"[^>]+href="([^"]+)"', + webpage, 'm3u8 url', default=None) + if m3u8_url: + formats.extend(self._extract_m3u8_formats( + m3u8_url, page_id, 'mp4', 'm3u8_native', + m3u8_id='hls', fatal=False)) + + direct_urls = re.findall( + r'rel="web(S|M|L|XL)"[^>]+href="([^"]+)"', webpage) + if direct_urls: + for quality, video_url in direct_urls: + formats.append({ + 'url': video_url, + 'preference': preference(quality), + 'http_headers': { + 'User-Agent': 'mobile', + }, + }) + + self._sort_formats(formats) description = self._html_search_meta('Description', webpage, 'description') return { 'id': page_id, - 'url': video_url, - 'ext': ext, + 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, diff --git a/youtube_dl/extractor/wimp.py b/youtube_dl/extractor/wimp.py index e4f50e64c..041ff6c55 100644 --- a/youtube_dl/extractor/wimp.py +++ b/youtube_dl/extractor/wimp.py @@ -5,7 +5,7 @@ from .youtube import YoutubeIE class WimpIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)/' + _VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.wimp.com/maruexhausted/', 'md5': 'ee21217ffd66d058e8b16be340b74883', @@ -28,18 +28,23 @@ class WimpIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) - video_url = self._search_regex( - [r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"], - webpage, 'video URL') - if YoutubeIE.suitable(video_url): - self.to_screen('Found YouTube video') + + youtube_id = self._search_regex( + r"videoId\s*:\s*[\"']([0-9A-Za-z_-]{11})[\"']", + webpage, 'video URL', default=None) + if youtube_id: return { '_type': 'url', - 'url': video_url, + 'url': youtube_id, 'ie_key': YoutubeIE.ie_key(), } + video_url = self._search_regex( + r'<video[^>]+>\s*<source[^>]+src=(["\'])(?P<url>.+?)\1', + webpage, 'video URL', group='url') + return { 'id': video_id, 'url': video_url, diff --git a/youtube_dl/extractor/wistia.py b/youtube_dl/extractor/wistia.py index 13a079151..fdb16d91c 100644 --- a/youtube_dl/extractor/wistia.py +++ b/youtube_dl/extractor/wistia.py @@ -1,8 +1,10 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import compat_urllib_request -from ..utils import ExtractorError +from ..utils import ( + ExtractorError, + sanitized_Request, +) class WistiaIE(InfoExtractor): @@ -23,7 +25,7 @@ class WistiaIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - request = compat_urllib_request.Request(self._API_URL.format(video_id)) + request = sanitized_Request(self._API_URL.format(video_id)) request.add_header('Referer', url) # Some videos require this. data_json = self._download_json(request, video_id) if data_json.get('error'): diff --git a/youtube_dl/extractor/wsj.py b/youtube_dl/extractor/wsj.py index 2ddf29a69..5a897371d 100644 --- a/youtube_dl/extractor/wsj.py +++ b/youtube_dl/extractor/wsj.py @@ -84,6 +84,5 @@ class WSJIE(InfoExtractor): 'duration': duration, 'upload_date': upload_date, 'title': title, - 'formats': formats, 'categories': categories, } diff --git a/youtube_dl/extractor/gorillavid.py b/youtube_dl/extractor/xfileshare.py index d23e3eac1..a3236e66c 100644 --- a/youtube_dl/extractor/gorillavid.py +++ b/youtube_dl/extractor/xfileshare.py @@ -1,25 +1,23 @@ -# -*- coding: utf-8 -*- +# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, - compat_urllib_request, -) +from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, encode_dict, int_or_none, + sanitized_Request, ) -class GorillaVidIE(InfoExtractor): - IE_DESC = 'GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net and filehoot.com' +class XFileShareIE(InfoExtractor): + IE_DESC = 'XFileShare based sites: GorillaVid.in, daclips.in, movpod.in, fastvideo.in, realvid.net, filehoot.com and vidto.me' _VALID_URL = r'''(?x) https?://(?P<host>(?:www\.)? - (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com))/ + (?:daclips\.in|gorillavid\.in|movpod\.in|fastvideo\.in|realvid\.net|filehoot\.com|vidto\.me))/ (?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)? ''' @@ -76,6 +74,13 @@ class GorillaVidIE(InfoExtractor): 'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4', 'thumbnail': 're:http://.*\.jpg', } + }, { + 'url': 'http://vidto.me/ku5glz52nqe1.html', + 'info_dict': { + 'id': 'ku5glz52nqe1', + 'ext': 'mp4', + 'title': 'test' + } }] def _real_extract(self, url): @@ -99,18 +104,23 @@ class GorillaVidIE(InfoExtractor): post = compat_urllib_parse.urlencode(encode_dict(fields)) - req = compat_urllib_request.Request(url, post) + req = sanitized_Request(url, post) req.add_header('Content-type', 'application/x-www-form-urlencoded') webpage = self._download_webpage(req, video_id, 'Downloading video page') - title = self._search_regex( - [r'style="z-index: [0-9]+;">([^<]+)</span>', r'<td nowrap>([^<]+)</td>', r'>Watch (.+) '], - webpage, 'title', default=None) or self._og_search_title(webpage) + title = (self._search_regex( + [r'style="z-index: [0-9]+;">([^<]+)</span>', + r'<td nowrap>([^<]+)</td>', + r'>Watch (.+) ', + r'<h2 class="video-page-head">([^<]+)</h2>'], + webpage, 'title', default=None) or self._og_search_title(webpage)).strip() video_url = self._search_regex( - r'file\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'file url') + [r'file\s*:\s*["\'](http[^"\']+)["\'],', + r'file_link\s*=\s*\'(https?:\/\/[0-9a-zA-z.\/\-_]+)'], + webpage, 'file url') thumbnail = self._search_regex( - r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', fatal=False) + r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None) formats = [{ 'format_id': 'sd', diff --git a/youtube_dl/extractor/xstream.py b/youtube_dl/extractor/xstream.py index 71584c291..76c91bd92 100644 --- a/youtube_dl/extractor/xstream.py +++ b/youtube_dl/extractor/xstream.py @@ -42,11 +42,7 @@ class XstreamIE(InfoExtractor): 'only_matching': True, }] - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - partner_id = mobj.group('partner_id') - video_id = mobj.group('id') - + def _extract_video_info(self, partner_id, video_id): data = self._download_xml( 'http://frontend.xstream.dk/%s/feed/video/?platform=web&id=%s' % (partner_id, video_id), @@ -97,6 +93,7 @@ class XstreamIE(InfoExtractor): formats.append({ 'url': link.get('href'), 'format_id': link.get('rel'), + 'preference': 1, }) thumbnails = [{ @@ -113,3 +110,10 @@ class XstreamIE(InfoExtractor): 'formats': formats, 'thumbnails': thumbnails, } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + partner_id = mobj.group('partner_id') + video_id = mobj.group('id') + + return self._extract_video_info(partner_id, video_id) diff --git a/youtube_dl/extractor/xtube.py b/youtube_dl/extractor/xtube.py index 779e4f46a..a1fe24050 100644 --- a/youtube_dl/extractor/xtube.py +++ b/youtube_dl/extractor/xtube.py @@ -3,12 +3,10 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_request, - compat_urllib_parse_unquote, -) +from ..compat import compat_urllib_parse_unquote from ..utils import ( parse_duration, + sanitized_Request, str_to_int, ) @@ -32,7 +30,7 @@ class XTubeIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - req = compat_urllib_request.Request(url) + req = sanitized_Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) diff --git a/youtube_dl/extractor/xvideos.py b/youtube_dl/extractor/xvideos.py index 5dcf2fdd1..710ad5041 100644 --- a/youtube_dl/extractor/xvideos.py +++ b/youtube_dl/extractor/xvideos.py @@ -3,14 +3,12 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_unquote, - compat_urllib_request, -) +from ..compat import compat_urllib_parse_unquote from ..utils import ( clean_html, ExtractorError, determine_ext, + sanitized_Request, ) @@ -48,7 +46,7 @@ class XVideosIE(InfoExtractor): 'url': video_url, }] - android_req = compat_urllib_request.Request(url) + android_req = sanitized_Request(url) android_req.add_header('User-Agent', self._ANDROID_USER_AGENT) android_webpage = self._download_webpage(android_req, video_id, fatal=False) diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py index 4098e4629..d3cc1a29f 100644 --- a/youtube_dl/extractor/yandexmusic.py +++ b/youtube_dl/extractor/yandexmusic.py @@ -8,11 +8,11 @@ from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse, - compat_urllib_request, ) from ..utils import ( int_or_none, float_or_none, + sanitized_Request, ) @@ -46,6 +46,12 @@ class YandexMusicTrackIE(InfoExtractor): % (data['host'], key, data['ts'] + data['path'], storage[1])) def _get_track_info(self, track): + thumbnail = None + cover_uri = track.get('albums', [{}])[0].get('coverUri') + if cover_uri: + thumbnail = cover_uri.replace('%%', 'orig') + if not thumbnail.startswith('http'): + thumbnail = 'http://' + thumbnail return { 'id': track['id'], 'ext': 'mp3', @@ -53,6 +59,7 @@ class YandexMusicTrackIE(InfoExtractor): 'title': '%s - %s' % (track['artists'][0]['name'], track['title']), 'filesize': int_or_none(track.get('fileSize')), 'duration': float_or_none(track.get('durationMs'), 1000), + 'thumbnail': thumbnail, } def _real_extract(self, url): @@ -147,7 +154,7 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE): if len(tracks) < len(track_ids): present_track_ids = set([compat_str(track['id']) for track in tracks if track.get('id')]) missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids) - request = compat_urllib_request.Request( + request = sanitized_Request( 'https://music.yandex.ru/handlers/track-entries.jsx', compat_urllib_parse.urlencode({ 'entries': ','.join(missing_track_ids), diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py index 2e81d9223..3a3432be8 100644 --- a/youtube_dl/extractor/youku.py +++ b/youtube_dl/extractor/youku.py @@ -4,12 +4,13 @@ from __future__ import unicode_literals import base64 from .common import InfoExtractor -from ..utils import ExtractorError - from ..compat import ( compat_urllib_parse, compat_ord, - compat_urllib_request, +) +from ..utils import ( + ExtractorError, + sanitized_Request, ) @@ -24,8 +25,8 @@ class YoukuIE(InfoExtractor): ''' _TESTS = [{ + # MD5 is unstable 'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html', - 'md5': '5f3af4192eabacc4501508d54a8cabd7', 'info_dict': { 'id': 'XMTc1ODE5Njcy_part1', 'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.', @@ -41,6 +42,7 @@ class YoukuIE(InfoExtractor): 'title': '武媚娘传奇 85', }, 'playlist_count': 11, + 'skip': 'Available in China only', }, { 'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html', 'info_dict': { @@ -48,7 +50,6 @@ class YoukuIE(InfoExtractor): 'title': '花千骨 04', }, 'playlist_count': 13, - 'skip': 'Available in China only', }, { 'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html', 'note': 'Video protected with password', @@ -62,7 +63,7 @@ class YoukuIE(InfoExtractor): }, }] - def construct_video_urls(self, data1, data2): + def construct_video_urls(self, data): # get sid, token def yk_t(s1, s2): ls = list(range(256)) @@ -80,34 +81,24 @@ class YoukuIE(InfoExtractor): return bytes(s) sid, token = yk_t( - b'becaf9be', base64.b64decode(data2['ep'].encode('ascii')) + b'becaf9be', base64.b64decode(data['security']['encrypt_string'].encode('ascii')) ).decode('ascii').split('_') # get oip - oip = data2['ip'] - - # get fileid - string_ls = list( - 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890') - shuffled_string_ls = [] - seed = data1['seed'] - N = len(string_ls) - for ii in range(N): - seed = (seed * 0xd3 + 0x754f) % 0x10000 - idx = seed * len(string_ls) // 0x10000 - shuffled_string_ls.append(string_ls[idx]) - del string_ls[idx] + oip = data['security']['ip'] fileid_dict = {} - for format in data1['streamtypes']: - streamfileid = [ - int(i) for i in data1['streamfileids'][format].strip('*').split('*')] - fileid = ''.join( - [shuffled_string_ls[i] for i in streamfileid]) - fileid_dict[format] = fileid[:8] + '%s' + fileid[10:] + for stream in data['stream']: + format = stream.get('stream_type') + fileid = stream['stream_fileid'] + fileid_dict[format] = fileid def get_fileid(format, n): - fileid = fileid_dict[format] % hex(int(n))[2:].upper().zfill(2) + number = hex(int(str(n), 10))[2:].upper() + if len(number) == 1: + number = '0' + number + streamfileids = fileid_dict[format] + fileid = streamfileids[0:8] + number + streamfileids[10:] return fileid # get ep @@ -122,15 +113,15 @@ class YoukuIE(InfoExtractor): # generate video_urls video_urls_dict = {} - for format in data1['streamtypes']: + for stream in data['stream']: + format = stream.get('stream_type') video_urls = [] - for dt in data1['segs'][format]: - n = str(int(dt['no'])) + for dt in stream['segs']: + n = str(stream['segs'].index(dt)) param = { - 'K': dt['k'], + 'K': dt['key'], 'hd': self.get_hd(format), 'myp': 0, - 'ts': dt['seconds'], 'ypp': 0, 'ctype': 12, 'ev': 1, @@ -141,7 +132,7 @@ class YoukuIE(InfoExtractor): video_url = \ 'http://k.youku.com/player/getFlvPath/' + \ 'sid/' + sid + \ - '_' + str(int(n) + 1).zfill(2) + \ + '_00' + \ '/st/' + self.parse_ext_l(format) + \ '/fileid/' + get_fileid(format, n) + '?' + \ compat_urllib_parse.urlencode(param) @@ -152,23 +143,31 @@ class YoukuIE(InfoExtractor): def get_hd(self, fm): hd_id_dict = { + '3gp': '0', + '3gphd': '1', 'flv': '0', + 'flvhd': '0', 'mp4': '1', + 'mp4hd': '1', + 'mp4hd2': '1', + 'mp4hd3': '1', 'hd2': '2', 'hd3': '3', - '3gp': '0', - '3gphd': '1' } return hd_id_dict[fm] def parse_ext_l(self, fm): ext_dict = { + '3gp': 'flv', + '3gphd': 'mp4', 'flv': 'flv', + 'flvhd': 'flv', 'mp4': 'mp4', + 'mp4hd': 'mp4', + 'mp4hd2': 'flv', + 'mp4hd3': 'flv', 'hd2': 'flv', 'hd3': 'flv', - '3gp': 'flv', - '3gphd': 'mp4' } return ext_dict[fm] @@ -177,9 +176,13 @@ class YoukuIE(InfoExtractor): '3gp': 'h6', '3gphd': 'h5', 'flv': 'h4', + 'flvhd': 'h4', 'mp4': 'h3', + 'mp4hd': 'h3', + 'mp4hd2': 'h4', + 'mp4hd3': 'h4', 'hd2': 'h2', - 'hd3': 'h1' + 'hd3': 'h1', } return _dict[fm] @@ -187,45 +190,46 @@ class YoukuIE(InfoExtractor): video_id = self._match_id(url) def retrieve_data(req_url, note): - req = compat_urllib_request.Request(req_url) + headers = { + 'Referer': req_url, + } + self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com') + req = sanitized_Request(req_url, headers=headers) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') if cn_verification_proxy: req.add_header('Ytdl-request-proxy', cn_verification_proxy) raw_data = self._download_json(req, video_id, note=note) - return raw_data['data'][0] + + return raw_data['data'] video_password = self._downloader.params.get('videopassword', None) # request basic data - basic_data_url = 'http://v.youku.com/player/getPlayList/VideoIDS/%s' % video_id + basic_data_url = "http://play.youku.com/play/get.json?vid=%s&ct=12" % video_id if video_password: - basic_data_url += '?password=%s' % video_password - - data1 = retrieve_data( - basic_data_url, - 'Downloading JSON metadata 1') - data2 = retrieve_data( - 'http://v.youku.com/player/getPlayList/VideoIDS/%s/Pf/4/ctype/12/ev/1' % video_id, - 'Downloading JSON metadata 2') - - error_code = data1.get('error_code') - if error_code: - error = data1.get('error') - if error is not None and '因版权原因无法观看此视频' in error: + basic_data_url += '&pwd=%s' % video_password + + data = retrieve_data(basic_data_url, 'Downloading JSON metadata') + + error = data.get('error') + if error: + error_note = error.get('note') + if error_note is not None and '因版权原因无法观看此视频' in error_note: raise ExtractorError( 'Youku said: Sorry, this video is available in China only', expected=True) else: - msg = 'Youku server reported error %i' % error_code - if error is not None: - msg += ': ' + error + msg = 'Youku server reported error %i' % error.get('code') + if error_note is not None: + msg += ': ' + error_note raise ExtractorError(msg) - title = data1['title'] + # get video title + title = data['video']['title'] # generate video_urls_dict - video_urls_dict = self.construct_video_urls(data1, data2) + video_urls_dict = self.construct_video_urls(data) # construct info entries = [{ @@ -234,10 +238,11 @@ class YoukuIE(InfoExtractor): 'formats': [], # some formats are not available for all parts, we have to detect # which one has all - } for i in range(max(len(v) for v in data1['segs'].values()))] - for fm in data1['streamtypes']: + } for i in range(max(len(v.get('segs')) for v in data['stream']))] + for stream in data['stream']: + fm = stream.get('stream_type') video_urls = video_urls_dict[fm] - for video_url, seg, entry in zip(video_urls, data1['segs'][fm], entries): + for video_url, seg, entry in zip(video_urls, stream['segs'], entries): entry['formats'].append({ 'url': video_url, 'format_id': self.get_format_name(fm), diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py index 4ba7c36db..dd724085a 100644 --- a/youtube_dl/extractor/youporn.py +++ b/youtube_dl/extractor/youporn.py @@ -1,121 +1,171 @@ from __future__ import unicode_literals - -import json import re -import sys from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_urlparse, - compat_urllib_request, -) from ..utils import ( - ExtractorError, + int_or_none, + sanitized_Request, + str_to_int, unescapeHTML, unified_strdate, ) -from ..aes import ( - aes_decrypt_text -) +from ..aes import aes_decrypt_text class YouPornIE(InfoExtractor): - _VALID_URL = r'^(?P<proto>https?://)(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?youporn\.com/watch/(?P<id>\d+)/(?P<display_id>[^/?#&]+)' + _TESTS = [{ 'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', + 'md5': '71ec5fcfddacf80f495efa8b6a8d9a89', 'info_dict': { 'id': '505835', + 'display_id': 'sex-ed-is-it-safe-to-masturbate-daily', 'ext': 'mp4', - 'upload_date': '20101221', + 'title': 'Sex Ed: Is It Safe To Masturbate Daily?', 'description': 'Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?', + 'thumbnail': 're:^https?://.*\.jpg$', 'uploader': 'Ask Dan And Jennifer', - 'title': 'Sex Ed: Is It Safe To Masturbate Daily?', + 'upload_date': '20101221', + 'average_rating': int, + 'view_count': int, + 'comment_count': int, + 'categories': list, + 'tags': list, 'age_limit': 18, - } - } + }, + }, { + # Anonymous User uploader + 'url': 'http://www.youporn.com/watch/561726/big-tits-awesome-brunette-on-amazing-webcam-show/?from=related3&al=2&from_id=561726&pos=4', + 'info_dict': { + 'id': '561726', + 'display_id': 'big-tits-awesome-brunette-on-amazing-webcam-show', + 'ext': 'mp4', + 'title': 'Big Tits Awesome Brunette On amazing webcam show', + 'description': 'http://sweetlivegirls.com Big Tits Awesome Brunette On amazing webcam show.mp4', + 'thumbnail': 're:^https?://.*\.jpg$', + 'uploader': 'Anonymous User', + 'upload_date': '20111125', + 'average_rating': int, + 'view_count': int, + 'comment_count': int, + 'categories': list, + 'tags': list, + 'age_limit': 18, + }, + 'params': { + 'skip_download': True, + }, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('videoid') - url = mobj.group('proto') + 'www.' + mobj.group('url') + video_id = mobj.group('id') + display_id = mobj.group('display_id') - req = compat_urllib_request.Request(url) - req.add_header('Cookie', 'age_verified=1') - webpage = self._download_webpage(req, video_id) - age_limit = self._rta_search(webpage) + request = sanitized_Request(url) + request.add_header('Cookie', 'age_verified=1') + webpage = self._download_webpage(request, display_id) + + title = self._search_regex( + [r'(?:video_titles|videoTitle)\s*[:=]\s*(["\'])(?P<title>.+?)\1', + r'<h1[^>]+class=["\']heading\d?["\'][^>]*>([^<])<'], + webpage, 'title', group='title') - # Get JSON parameters - json_params = self._search_regex( - [r'videoJa?son\s*=\s*({.+})', - r'var\s+currentVideo\s*=\s*new\s+Video\((.+?)\)[,;]'], - webpage, 'JSON parameters') - try: - params = json.loads(json_params) - except ValueError: - raise ExtractorError('Invalid JSON') - - self.report_extraction(video_id) - try: - video_title = params['title'] - upload_date = unified_strdate(params['release_date_f']) - video_description = params['description'] - video_uploader = params['submitted_by'] - thumbnail = params['thumbnails'][0]['image'] - except KeyError: - raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1]) - - # Get all of the links from the page - DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>' - download_list_html = self._search_regex(DOWNLOAD_LIST_RE, - webpage, 'download list').strip() - LINK_RE = r'<a href="([^"]+)">' - links = re.findall(LINK_RE, download_list_html) - - # Get all encrypted links - encrypted_links = re.findall(r'var encryptedQuality[0-9]{3}URL = \'([a-zA-Z0-9+/]+={0,2})\';', webpage) - for encrypted_link in encrypted_links: - link = aes_decrypt_text(encrypted_link, video_title, 32).decode('utf-8') + links = [] + + sources = self._search_regex( + r'sources\s*:\s*({.+?})', webpage, 'sources', default=None) + if sources: + for _, link in re.findall(r'[^:]+\s*:\s*(["\'])(http.+?)\1', sources): + links.append(link) + + # Fallback #1 + for _, link in re.findall( + r'(?:videoUrl|videoSrc|videoIpadUrl|html5PlayerSrc)\s*[:=]\s*(["\'])(http.+?)\1', webpage): + links.append(link) + + # Fallback #2, this also contains extra low quality 180p format + for _, link in re.findall(r'<a[^>]+href=(["\'])(http.+?)\1[^>]+title=["\']Download [Vv]ideo', webpage): links.append(link) + # Fallback #3, encrypted links + for _, encrypted_link in re.findall( + r'encryptedQuality\d{3,4}URL\s*=\s*(["\'])([\da-zA-Z+/=]+)\1', webpage): + links.append(aes_decrypt_text(encrypted_link, title, 32).decode('utf-8')) + formats = [] - for link in links: - # A link looks like this: - # http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0 - # A path looks like this: - # /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4 - video_url = unescapeHTML(link) - path = compat_urllib_parse_urlparse(video_url).path - format_parts = path.split('/')[4].split('_')[:2] - - dn = compat_urllib_parse_urlparse(video_url).netloc.partition('.')[0] - - resolution = format_parts[0] - height = int(resolution[:-len('p')]) - bitrate = int(format_parts[1][:-len('k')]) - format = '-'.join(format_parts) + '-' + dn - - formats.append({ + for video_url in set(unescapeHTML(link) for link in links): + f = { 'url': video_url, - 'format': format, - 'format_id': format, - 'height': height, - 'tbr': bitrate, - 'resolution': resolution, - }) - + } + # Video URL's path looks like this: + # /201012/17/505835/720p_1500k_505835/YouPorn%20-%20Sex%20Ed%20Is%20It%20Safe%20To%20Masturbate%20Daily.mp4 + # We will benefit from it by extracting some metadata + mobj = re.search(r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+/', video_url) + if mobj: + height = int(mobj.group('height')) + bitrate = int(mobj.group('bitrate')) + f.update({ + 'format_id': '%dp-%dk' % (height, bitrate), + 'height': height, + 'tbr': bitrate, + }) + formats.append(f) self._sort_formats(formats) - if not formats: - raise ExtractorError('ERROR: no known formats available for video') + description = self._html_search_regex( + r'(?s)<div[^>]+class=["\']video-description["\'][^>]*>(.+?)</div>', + webpage, 'description', default=None) + thumbnail = self._search_regex( + r'(?:imageurl\s*=|poster\s*:)\s*(["\'])(?P<thumbnail>.+?)\1', + webpage, 'thumbnail', fatal=False, group='thumbnail') + + uploader = self._html_search_regex( + r'(?s)<div[^>]+class=["\']videoInfoBy["\'][^>]*>\s*By:\s*</div>(.+?)</(?:a|div)>', + webpage, 'uploader', fatal=False) + upload_date = unified_strdate(self._html_search_regex( + r'(?s)<div[^>]+class=["\']videoInfoTime["\'][^>]*>(.+?)</div>', + webpage, 'upload date', fatal=False)) + + age_limit = self._rta_search(webpage) + + average_rating = int_or_none(self._search_regex( + r'<div[^>]+class=["\']videoInfoRating["\'][^>]*>\s*<div[^>]+class=["\']videoRatingPercentage["\'][^>]*>(\d+)%</div>', + webpage, 'average rating', fatal=False)) + + view_count = str_to_int(self._search_regex( + r'(?s)<div[^>]+class=["\']videoInfoViews["\'][^>]*>.*?([\d,.]+)\s*</div>', + webpage, 'view count', fatal=False)) + comment_count = str_to_int(self._search_regex( + r'>All [Cc]omments? \(([\d,.]+)\)', + webpage, 'comment count', fatal=False)) + + def extract_tag_box(title): + tag_box = self._search_regex( + (r'<div[^>]+class=["\']tagBoxTitle["\'][^>]*>\s*%s\b.*?</div>\s*' + '<div[^>]+class=["\']tagBoxContent["\']>(.+?)</div>') % re.escape(title), + webpage, '%s tag box' % title, default=None) + if not tag_box: + return [] + return re.findall(r'<a[^>]+href=[^>]+>([^<]+)', tag_box) + + categories = extract_tag_box('Category') + tags = extract_tag_box('Tags') return { 'id': video_id, - 'uploader': video_uploader, - 'upload_date': upload_date, - 'title': video_title, + 'display_id': display_id, + 'title': title, + 'description': description, 'thumbnail': thumbnail, - 'description': video_description, + 'uploader': uploader, + 'upload_date': upload_date, + 'average_rating': average_rating, + 'view_count': view_count, + 'comment_count': comment_count, + 'categories': categories, + 'tags': tags, 'age_limit': age_limit, 'formats': formats, } diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index b252e36e1..4aac2cc03 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -20,13 +20,13 @@ from ..compat import ( compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus, compat_urllib_parse_urlparse, - compat_urllib_request, compat_urlparse, compat_str, ) from ..utils import ( clean_html, encode_dict, + error_to_compat_str, ExtractorError, float_or_none, get_element_by_attribute, @@ -34,7 +34,9 @@ from ..utils import ( int_or_none, orderedSet, parse_duration, + remove_quotes, remove_start, + sanitized_Request, smuggle_url, str_to_int, unescapeHTML, @@ -114,7 +116,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii') - req = compat_urllib_request.Request(self._LOGIN_URL, login_data) + req = sanitized_Request(self._LOGIN_URL, login_data) login_results = self._download_webpage( req, None, note='Logging in', errnote='unable to log in', fatal=False) @@ -147,7 +149,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii') - tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data) + tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data) tfa_results = self._download_webpage( tfa_req, None, note='Submitting TFA code', errnote='unable to submit tfa', fatal=False) @@ -178,6 +180,69 @@ class YoutubeBaseInfoExtractor(InfoExtractor): return +class YoutubeEntryListBaseInfoExtractor(InfoExtractor): + # Extract entries from page with "Load more" button + def _entries(self, page, playlist_id): + more_widget_html = content_html = page + for page_num in itertools.count(1): + for entry in self._process_page(content_html): + yield entry + + mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) + if not mobj: + break + + more = self._download_json( + 'https://youtube.com/%s' % mobj.group('more'), playlist_id, + 'Downloading page #%s' % page_num, + transform_source=uppercase_escape) + content_html = more['content_html'] + if not content_html.strip(): + # Some webpages show a "Load more" button but they don't + # have more videos + break + more_widget_html = more['load_more_widget_html'] + + +class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): + def _process_page(self, content): + for video_id, video_title in self.extract_videos_from_page(content): + yield self.url_result(video_id, 'Youtube', video_id, video_title) + + def extract_videos_from_page(self, page): + ids_in_page = [] + titles_in_page = [] + for mobj in re.finditer(self._VIDEO_RE, page): + # The link with index 0 is not the first video of the playlist (not sure if still actual) + if 'index' in mobj.groupdict() and mobj.group('id') == '0': + continue + video_id = mobj.group('id') + video_title = unescapeHTML(mobj.group('title')) + if video_title: + video_title = video_title.strip() + try: + idx = ids_in_page.index(video_id) + if video_title and not titles_in_page[idx]: + titles_in_page[idx] = video_title + except ValueError: + ids_in_page.append(video_id) + titles_in_page.append(video_title) + return zip(ids_in_page, titles_in_page) + + +class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor): + def _process_page(self, content): + for playlist_id in re.findall(r'href="/?playlist\?list=(.+?)"', content): + yield self.url_result( + 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist') + + def _real_extract(self, url): + playlist_id = self._match_id(url) + webpage = self._download_webpage(url, playlist_id) + title = self._og_search_title(webpage, fatal=False) + return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title) + + class YoutubeIE(YoutubeBaseInfoExtractor): IE_DESC = 'YouTube.com' _VALID_URL = r"""(?x)^ @@ -195,7 +260,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! - (?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx) + (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&v=V36LpHqtcDY) v= ) )) @@ -283,6 +348,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, + # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug) '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'}, '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'}, @@ -331,12 +397,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'upload_date': '20120506', 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]', + 'alt_title': 'I Love It (feat. Charli XCX)', 'description': 'md5:782e8651347686cba06e58f71ab51773', 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli', 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop', 'iconic ep', 'iconic', 'love', 'it'], 'uploader': 'Icona Pop', 'uploader_id': 'IconaPop', + 'creator': 'Icona Pop', } }, { @@ -347,9 +415,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'upload_date': '20130703', 'title': 'Justin Timberlake - Tunnel Vision (Explicit)', + 'alt_title': 'Tunnel Vision', 'description': 'md5:64249768eec3bc4276236606ea996373', 'uploader': 'justintimberlakeVEVO', 'uploader_id': 'justintimberlakeVEVO', + 'creator': 'Justin Timberlake', 'age_limit': 18, } }, @@ -363,7 +433,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012', 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7', 'uploader': 'SET India', - 'uploader_id': 'setindia' + 'uploader_id': 'setindia', + 'age_limit': 18, } }, { @@ -427,10 +498,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'nfWlot6h_JM', 'ext': 'm4a', 'title': 'Taylor Swift - Shake It Off', + 'alt_title': 'Shake It Off', 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3', 'uploader': 'TaylorSwiftVEVO', 'uploader_id': 'TaylorSwiftVEVO', 'upload_date': '20140818', + 'creator': 'Taylor Swift', }, 'params': { 'youtube_include_dash_manifest': True, @@ -486,9 +559,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'upload_date': '20100430', 'uploader_id': 'deadmau5', + 'creator': 'deadmau5', 'description': 'md5:12c56784b8032162bb936a5f76d55360', 'uploader': 'deadmau5', 'title': 'Deadmau5 - Some Chords (HD)', + 'alt_title': 'Some Chords', }, 'expected_warnings': [ 'DASH manifest missing', @@ -500,7 +575,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'info_dict': { 'id': 'lqQg6PlCWgI', 'ext': 'mp4', - 'upload_date': '20120724', + 'upload_date': '20150827', 'uploader_id': 'olympic', 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games', 'uploader': 'Olympics', @@ -628,6 +703,49 @@ class YoutubeIE(YoutubeBaseInfoExtractor): { 'url': 'http://vid.plus/FlRa-iH7PGw', 'only_matching': True, + }, + { + # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468) + 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg', + 'info_dict': { + 'id': 'lsguqyKfVQg', + 'ext': 'mp4', + 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21', + 'alt_title': 'Dark Walk', + 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a', + 'upload_date': '20151119', + 'uploader_id': 'IronSoulElf', + 'uploader': 'IronSoulElf', + 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan', + }, + 'params': { + 'skip_download': True, + }, + }, + { + # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468) + 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8', + 'only_matching': True, + }, + { + # Video with yt:stretch=17:0 + 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM', + 'info_dict': { + 'id': 'Q39EVAstoRM', + 'ext': 'mp4', + 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4', + 'description': 'md5:ee18a25c350637c8faff806845bddee9', + 'upload_date': '20151107', + 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA', + 'uploader': 'CH GAMER DROID', + }, + 'params': { + 'skip_download': True, + }, + }, + { + 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;v=V36LpHqtcDY', + 'only_matching': True, } ] @@ -657,7 +775,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): def _extract_signature_function(self, video_id, player_url, example_sig): id_m = re.match( - r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?)?\.(?P<ext>[a-z]+)$', + r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$', player_url) if not id_m: raise ExtractorError('Cannot identify player %r' % player_url) @@ -786,7 +904,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id, video_id, note=False) except ExtractorError as err: - self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err)) + self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err)) return {} sub_lang_list = {} @@ -812,16 +930,33 @@ class YoutubeIE(YoutubeBaseInfoExtractor): return {} return sub_lang_list + def _get_ytplayer_config(self, video_id, webpage): + patterns = ( + # User data may contain arbitrary character sequences that may affect + # JSON extraction with regex, e.g. when '};' is contained the second + # regex won't capture the whole JSON. Yet working around by trying more + # concrete regex first keeping in mind proper quoted string handling + # to be implemented in future that will replace this workaround (see + # https://github.com/rg3/youtube-dl/issues/7468, + # https://github.com/rg3/youtube-dl/pull/7599) + r';ytplayer\.config\s*=\s*({.+?});ytplayer', + r';ytplayer\.config\s*=\s*({.+?});', + ) + config = self._search_regex( + patterns, webpage, 'ytplayer.config', default=None) + if config: + return self._parse_json( + uppercase_escape(config), video_id, fatal=False) + def _get_automatic_captions(self, video_id, webpage): """We need the webpage for getting the captions url, pass it as an argument to speed up the process.""" self.to_screen('%s: Looking for automatic captions' % video_id) - mobj = re.search(r';ytplayer.config = ({.*?});', webpage) + player_config = self._get_ytplayer_config(video_id, webpage) err_msg = 'Couldn\'t find automatic captions for %s' % video_id - if mobj is None: + if not player_config: self._downloader.report_warning(err_msg) return {} - player_config = json.loads(mobj.group(1)) try: args = player_config['args'] caption_url = args['ttsurl'] @@ -1028,10 +1163,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): age_gate = False video_info = None # Try looking directly into the video webpage - mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage) - if mobj: - json_code = uppercase_escape(mobj.group(1)) - ytplayer_config = json.loads(json_code) + ytplayer_config = self._get_ytplayer_config(video_id, video_webpage) + if ytplayer_config: args = ytplayer_config['args'] if args.get('url_encoded_fmt_stream_map'): # Convert to the same format returned by compat_parse_qs @@ -1061,6 +1194,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor): if not video_info: video_info = get_video_info if 'token' in get_video_info: + # Different get_video_info requests may report different results, e.g. + # some may report video unavailability, but some may serve it without + # any complaint (see https://github.com/rg3/youtube-dl/issues/7362, + # the original webpage as well as el=info and el=embedded get_video_info + # requests report video unavailability due to geo restriction while + # el=detailpage succeeds and returns valid data). This is probably + # due to YouTube measures against IP ranges of hosting providers. + # Working around by preferring the first succeeded video_info containing + # the token if no such video_info yet was found. + if 'token' not in video_info: + video_info = get_video_info break if 'token' not in video_info: if 'reason' in video_info: @@ -1176,6 +1320,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor): upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) upload_date = unified_strdate(upload_date) + m_music = re.search( + r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li', + video_webpage) + if m_music: + video_alt_title = remove_quotes(unescapeHTML(m_music.group('title'))) + video_creator = clean_html(m_music.group('creator')) + else: + video_alt_title = video_creator = None + m_cat_container = self._search_regex( r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>', video_webpage, 'categories', default=None) @@ -1286,7 +1439,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): player_desc = 'flash player %s' % player_version else: player_version = self._search_regex( - r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', + [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'], player_url, 'html5 player', fatal=False) player_desc = 'html5 player %s' % player_version @@ -1348,6 +1501,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): manifest_url = video_info['hlsvp'][0] url_map = self._extract_from_m3u8(manifest_url, video_id) formats = _map_to_format_list(url_map) + # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming + for a_format in formats: + a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True' else: raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info') @@ -1385,10 +1541,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor): r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">', video_webpage) if stretched_m: - ratio = float(stretched_m.group('w')) / float(stretched_m.group('h')) - for f in formats: - if f.get('vcodec') != 'none': - f['stretched_ratio'] = ratio + w = float(stretched_m.group('w')) + h = float(stretched_m.group('h')) + # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0). + # We will only process correct ratios. + if w > 0 and h > 0: + ratio = w / h + for f in formats: + if f.get('vcodec') != 'none': + f['stretched_ratio'] = ratio self._sort_formats(formats) @@ -1397,7 +1558,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'uploader': video_uploader, 'uploader_id': video_uploader_id, 'upload_date': upload_date, + 'creator': video_creator, 'title': video_title, + 'alt_title': video_alt_title, 'thumbnail': video_thumbnail, 'description': video_description, 'categories': video_categories, @@ -1419,7 +1582,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): } -class YoutubePlaylistIE(YoutubeBaseInfoExtractor): +class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com playlists' _VALID_URL = r"""(?x)(?: (?:https?://)? @@ -1427,7 +1590,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): youtube\.com/ (?: (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries) - \? (?:.*?&)*? (?:p|a|list)= + \? (?:.*?[&;])*? (?:p|a|list)= | p/ ) ( @@ -1440,7 +1603,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,}) )""" _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s' - _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)' + _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?' IE_NAME = 'youtube:playlist' _TESTS = [{ 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re', @@ -1557,37 +1720,11 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): else: self.report_warning('Youtube gives an alert message: ' + match) - # Extract the video ids from the playlist pages - def _entries(): - more_widget_html = content_html = page - for page_num in itertools.count(1): - matches = re.finditer(self._VIDEO_RE, content_html) - # We remove the duplicates and the link with index 0 - # (it's not the first video of the playlist) - new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0') - for vid_id in new_ids: - yield self.url_result(vid_id, 'Youtube', video_id=vid_id) - - mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html) - if not mobj: - break - - more = self._download_json( - 'https://youtube.com/%s' % mobj.group('more'), playlist_id, - 'Downloading page #%s' % page_num, - transform_source=uppercase_escape) - content_html = more['content_html'] - if not content_html.strip(): - # Some webpages show a "Load more" button but they don't - # have more videos - break - more_widget_html = more['load_more_widget_html'] - playlist_title = self._html_search_regex( - r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>', + r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>', page, 'title') - return self.playlist_result(_entries(), playlist_id, playlist_title) + return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title) def _real_extract(self, url): # Extract playlist id @@ -1613,35 +1750,34 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor): return self._extract_playlist(playlist_id) -class YoutubeChannelIE(InfoExtractor): +class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com channels' _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)' _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos' + _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?' IE_NAME = 'youtube:channel' _TESTS = [{ 'note': 'paginated channel', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'playlist_mincount': 91, 'info_dict': { - 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', + 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w', + 'title': 'Uploads from lex will', } + }, { + 'note': 'Age restricted channel', + # from https://www.youtube.com/user/DeusExOfficial + 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w', + 'playlist_mincount': 64, + 'info_dict': { + 'id': 'UUs0ifCMCm1icqRbqhUINa0w', + 'title': 'Uploads from Deus Ex', + }, }] - @staticmethod - def extract_videos_from_page(page): - ids_in_page = [] - titles_in_page = [] - for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page): - video_id = mobj.group('id') - video_title = unescapeHTML(mobj.group('title')) - try: - idx = ids_in_page.index(video_id) - if video_title and not titles_in_page[idx]: - titles_in_page[idx] = video_title - except ValueError: - ids_in_page.append(video_id) - titles_in_page.append(video_title) - return zip(ids_in_page, titles_in_page) + @classmethod + def suitable(cls, url): + return False if YoutubePlaylistsIE.suitable(url) else super(YoutubeChannelIE, cls).suitable(url) def _real_extract(self, url): channel_id = self._match_id(url) @@ -1661,7 +1797,7 @@ class YoutubeChannelIE(InfoExtractor): 'channelId', channel_page, 'channel id', default=None) if not channel_playlist_id: channel_playlist_id = self._search_regex( - r'data-channel-external-id="([^"]+)"', + r'data-(?:channel-external-|yt)id="([^"]+)"', channel_page, 'channel id', default=None) if channel_playlist_id and channel_playlist_id.startswith('UC'): playlist_id = 'UU' + channel_playlist_id[2:] @@ -1685,29 +1821,7 @@ class YoutubeChannelIE(InfoExtractor): for video_id, video_title in self.extract_videos_from_page(channel_page)] return self.playlist_result(entries, channel_id) - def _entries(): - more_widget_html = content_html = channel_page - for pagenum in itertools.count(1): - - for video_id, video_title in self.extract_videos_from_page(content_html): - yield self.url_result( - video_id, 'Youtube', video_id=video_id, - video_title=video_title) - - mobj = re.search( - r'data-uix-load-more-href="/?(?P<more>[^"]+)"', - more_widget_html) - if not mobj: - break - - more = self._download_json( - 'https://youtube.com/%s' % mobj.group('more'), channel_id, - 'Downloading page #%s' % (pagenum + 1), - transform_source=uppercase_escape) - content_html = more['content_html'] - more_widget_html = more['load_more_widget_html'] - - return self.playlist_result(_entries(), channel_id) + return self.playlist_result(self._entries(channel_page, channel_id), channel_id) class YoutubeUserIE(YoutubeChannelIE): @@ -1738,6 +1852,36 @@ class YoutubeUserIE(YoutubeChannelIE): return super(YoutubeUserIE, cls).suitable(url) +class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor): + IE_DESC = 'YouTube.com user/channel playlists' + _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists' + IE_NAME = 'youtube:playlists' + + _TESTS = [{ + 'url': 'http://www.youtube.com/user/ThirstForScience/playlists', + 'playlist_mincount': 4, + 'info_dict': { + 'id': 'ThirstForScience', + 'title': 'Thirst for Science', + }, + }, { + # with "Load more" button + 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd', + 'playlist_mincount': 70, + 'info_dict': { + 'id': 'igorkle1', + 'title': 'Игорь Клейнер', + }, + }, { + 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists', + 'playlist_mincount': 17, + 'info_dict': { + 'id': 'UCiU1dHvZObB2iP6xkJ__Icw', + 'title': 'Chem Player', + }, + }] + + class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE): IE_DESC = 'YouTube.com searches' # there doesn't appear to be a real limit, for example if you search for @@ -1833,7 +1977,7 @@ class YoutubeSearchURLIE(InfoExtractor): } -class YoutubeShowIE(InfoExtractor): +class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor): IE_DESC = 'YouTube.com (multi-season) shows' _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)' IE_NAME = 'youtube:show' @@ -1847,26 +1991,9 @@ class YoutubeShowIE(InfoExtractor): }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - playlist_id = mobj.group('id') - webpage = self._download_webpage( - 'https://www.youtube.com/show/%s/playlists' % playlist_id, playlist_id, 'Downloading show webpage') - # There's one playlist for each season of the show - m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage)) - self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons))) - entries = [ - self.url_result( - 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist') - for season in m_seasons - ] - title = self._og_search_title(webpage, fatal=False) - - return { - '_type': 'playlist', - 'id': playlist_id, - 'title': title, - 'entries': entries, - } + playlist_id = self._match_id(url) + return super(YoutubeShowIE, self)._real_extract( + 'https://www.youtube.com/show/%s/playlists' % playlist_id) class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py index 98f15177b..2a1f2f6d1 100644 --- a/youtube_dl/extractor/zdf.py +++ b/youtube_dl/extractor/zdf.py @@ -9,89 +9,17 @@ from ..utils import ( int_or_none, unified_strdate, OnDemandPagedList, + xpath_text, + determine_ext, + qualities, + float_or_none, ) -def extract_from_xml_url(ie, video_id, xml_url): - doc = ie._download_xml( - xml_url, video_id, - note='Downloading video info', - errnote='Failed to download video info') - - title = doc.find('.//information/title').text - description = doc.find('.//information/detail').text - duration = int(doc.find('.//details/lengthSec').text) - uploader_node = doc.find('.//details/originChannelTitle') - uploader = None if uploader_node is None else uploader_node.text - uploader_id_node = doc.find('.//details/originChannelId') - uploader_id = None if uploader_id_node is None else uploader_id_node.text - upload_date = unified_strdate(doc.find('.//details/airtime').text) - - def xml_to_format(fnode): - video_url = fnode.find('url').text - is_available = 'http://www.metafilegenerator' not in video_url - - format_id = fnode.attrib['basetype'] - format_m = re.match(r'''(?x) - (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_ - (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+) - ''', format_id) - - ext = format_m.group('container') - proto = format_m.group('proto').lower() - - quality = fnode.find('./quality').text - abr = int(fnode.find('./audioBitrate').text) // 1000 - vbr_node = fnode.find('./videoBitrate') - vbr = None if vbr_node is None else int(vbr_node.text) // 1000 - - width_node = fnode.find('./width') - width = None if width_node is None else int_or_none(width_node.text) - height_node = fnode.find('./height') - height = None if height_node is None else int_or_none(height_node.text) - - format_note = '' - if not format_note: - format_note = None - - return { - 'format_id': format_id + '-' + quality, - 'url': video_url, - 'ext': ext, - 'acodec': format_m.group('acodec'), - 'vcodec': format_m.group('vcodec'), - 'abr': abr, - 'vbr': vbr, - 'width': width, - 'height': height, - 'filesize': int_or_none(fnode.find('./filesize').text), - 'format_note': format_note, - 'protocol': proto, - '_available': is_available, - } - - format_nodes = doc.findall('.//formitaeten/formitaet') - formats = list(filter( - lambda f: f['_available'], - map(xml_to_format, format_nodes))) - ie._sort_formats(formats) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'duration': duration, - 'uploader': uploader, - 'uploader_id': uploader_id, - 'upload_date': upload_date, - 'formats': formats, - } - - class ZDFIE(InfoExtractor): _VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?' - _TEST = { + _TESTS = [{ 'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt', 'info_dict': { 'id': '2037704', @@ -104,23 +32,177 @@ class ZDFIE(InfoExtractor): 'upload_date': '20131127', }, 'skip': 'Videos on ZDF.de are depublicised in short order', - } + }] + + def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): + param_groups = {} + for param_group in smil.findall(self._xpath_ns('./head/paramGroup', namespace)): + group_id = param_group.attrib.get(self._xpath_ns('id', 'http://www.w3.org/XML/1998/namespace')) + params = {} + for param in param_group: + params[param.get('name')] = param.get('value') + param_groups[group_id] = params + + formats = [] + for video in smil.findall(self._xpath_ns('.//video', namespace)): + src = video.get('src') + if not src: + continue + bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000) + group_id = video.get('paramGroup') + param_group = param_groups[group_id] + for proto in param_group['protocols'].split(','): + formats.append({ + 'url': '%s://%s' % (proto, param_group['host']), + 'app': param_group['app'], + 'play_path': src, + 'ext': 'flv', + 'format_id': '%s-%d' % (proto, bitrate), + 'tbr': bitrate, + 'protocol': proto, + }) + self._sort_formats(formats) + return formats + + def extract_from_xml_url(self, video_id, xml_url): + doc = self._download_xml( + xml_url, video_id, + note='Downloading video info', + errnote='Failed to download video info') + + title = doc.find('.//information/title').text + description = xpath_text(doc, './/information/detail', 'description') + duration = int_or_none(xpath_text(doc, './/details/lengthSec', 'duration')) + uploader = xpath_text(doc, './/details/originChannelTitle', 'uploader') + uploader_id = xpath_text(doc, './/details/originChannelId', 'uploader id') + upload_date = unified_strdate(xpath_text(doc, './/details/airtime', 'upload date')) + + def xml_to_thumbnails(fnode): + thumbnails = [] + for node in fnode: + thumbnail_url = node.text + if not thumbnail_url: + continue + thumbnail = { + 'url': thumbnail_url, + } + if 'key' in node.attrib: + m = re.match('^([0-9]+)x([0-9]+)$', node.attrib['key']) + if m: + thumbnail['width'] = int(m.group(1)) + thumbnail['height'] = int(m.group(2)) + thumbnails.append(thumbnail) + return thumbnails + + thumbnails = xml_to_thumbnails(doc.findall('.//teaserimages/teaserimage')) + + format_nodes = doc.findall('.//formitaeten/formitaet') + quality = qualities(['veryhigh', 'high', 'med', 'low']) + + def get_quality(elem): + return quality(xpath_text(elem, 'quality')) + format_nodes.sort(key=get_quality) + format_ids = [] + formats = [] + for fnode in format_nodes: + video_url = fnode.find('url').text + is_available = 'http://www.metafilegenerator' not in video_url + if not is_available: + continue + format_id = fnode.attrib['basetype'] + quality = xpath_text(fnode, './quality', 'quality') + format_m = re.match(r'''(?x) + (?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_ + (?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+) + ''', format_id) + + ext = determine_ext(video_url, None) or format_m.group('container') + if ext not in ('smil', 'f4m', 'm3u8'): + format_id = format_id + '-' + quality + if format_id in format_ids: + continue + + if ext == 'meta': + continue + elif ext == 'smil': + formats.extend(self._extract_smil_formats( + video_url, video_id, fatal=False)) + elif ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) + elif ext == 'f4m': + formats.extend(self._extract_f4m_formats( + video_url, video_id, f4m_id='hds', fatal=False)) + else: + proto = format_m.group('proto').lower() + + abr = int_or_none(xpath_text(fnode, './audioBitrate', 'abr'), 1000) + vbr = int_or_none(xpath_text(fnode, './videoBitrate', 'vbr'), 1000) + + width = int_or_none(xpath_text(fnode, './width', 'width')) + height = int_or_none(xpath_text(fnode, './height', 'height')) + + filesize = int_or_none(xpath_text(fnode, './filesize', 'filesize')) + + format_note = '' + if not format_note: + format_note = None + + formats.append({ + 'format_id': format_id, + 'url': video_url, + 'ext': ext, + 'acodec': format_m.group('acodec'), + 'vcodec': format_m.group('vcodec'), + 'abr': abr, + 'vbr': vbr, + 'width': width, + 'height': height, + 'filesize': filesize, + 'format_note': format_note, + 'protocol': proto, + '_available': is_available, + }) + format_ids.append(format_id) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'duration': duration, + 'thumbnails': thumbnails, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'upload_date': upload_date, + 'formats': formats, + } def _real_extract(self, url): video_id = self._match_id(url) xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id - return extract_from_xml_url(self, video_id, xml_url) + return self.extract_from_xml_url(video_id, xml_url) class ZDFChannelIE(InfoExtractor): - _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/)(?P<id>[0-9]+)' - _TEST = { + _VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/(?:[^/]+/)?)(?P<id>[0-9]+)' + _TESTS = [{ 'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic', 'info_dict': { 'id': '1586442', }, 'playlist_count': 3, - } + }, { + 'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/aktuellste/332', + 'only_matching': True, + }, { + 'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/meist-gesehen/332', + 'only_matching': True, + }, { + 'url': 'http://www.zdf.de/ZDFmediathek/kanaluebersicht/_/1798716?bc=nrt;nrm?flash=off', + 'only_matching': True, + }] _PAGE_SIZE = 50 def _fetch_page(self, channel_id, page): diff --git a/youtube_dl/extractor/zingmp3.py b/youtube_dl/extractor/zingmp3.py index 7dc1e2f2b..437eecb67 100644 --- a/youtube_dl/extractor/zingmp3.py +++ b/youtube_dl/extractor/zingmp3.py @@ -9,9 +9,11 @@ from ..utils import ExtractorError class ZingMp3BaseInfoExtractor(InfoExtractor): - def _extract_item(self, item): + def _extract_item(self, item, fatal=True): error_message = item.find('./errormessage').text if error_message: + if not fatal: + return raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, error_message), expected=True) @@ -43,7 +45,9 @@ class ZingMp3BaseInfoExtractor(InfoExtractor): entries = [] for i, item in enumerate(items, 1): - entry = self._extract_item(item) + entry = self._extract_item(item, fatal=False) + if not entry: + continue entry['id'] = '%s-%d' % (id, i) entries.append(entry) @@ -85,7 +89,7 @@ class ZingMp3SongIE(ZingMp3BaseInfoExtractor): class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor): - _VALID_URL = r'https?://mp3\.zing\.vn/album/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html' + _VALID_URL = r'https?://mp3\.zing\.vn/(?:album|playlist)/(?P<slug>[^/]+)/(?P<album_id>\w+)\.html' _TESTS = [{ 'url': 'http://mp3.zing.vn/album/Lau-Dai-Tinh-Ai-Bang-Kieu-Minh-Tuyet/ZWZBWDAF.html', 'info_dict': { @@ -94,6 +98,9 @@ class ZingMp3AlbumIE(ZingMp3BaseInfoExtractor): 'title': 'Lâu Đài Tình Ái - Bằng Kiều ft. Minh Tuyết | Album 320 lossless', }, 'playlist_count': 10, + }, { + 'url': 'http://mp3.zing.vn/playlist/Duong-Hong-Loan-apollobee/IWCAACCB.html', + 'only_matching': True, }] IE_NAME = 'zingmp3:album' IE_DESC = 'mp3.zing.vn albums' diff --git a/youtube_dl/jsinterp.py b/youtube_dl/jsinterp.py index 0e0c7d90d..a7440c582 100644 --- a/youtube_dl/jsinterp.py +++ b/youtube_dl/jsinterp.py @@ -214,7 +214,7 @@ class JSInterpreter(object): obj = {} obj_m = re.search( (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) + - r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' + + r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' + r'\}\s*;', self.code) fields = obj_m.group('fields') @@ -232,10 +232,10 @@ class JSInterpreter(object): def extract_function(self, funcname): func_m = re.search( r'''(?x) - (?:function\s+%s|[{;]%s\s*=\s*function)\s* + (?:function\s+%s|[{;,]%s\s*=\s*function|var\s+%s\s*=\s*function)\s* \((?P<args>[^)]*)\)\s* \{(?P<code>[^}]+)\}''' % ( - re.escape(funcname), re.escape(funcname)), + re.escape(funcname), re.escape(funcname), re.escape(funcname)), self.code) if func_m is None: raise ExtractorError('Could not find JS function %r' % funcname) diff --git a/youtube_dl/options.py b/youtube_dl/options.py index 5eccc0a70..c46e136bf 100644 --- a/youtube_dl/options.py +++ b/youtube_dl/options.py @@ -276,7 +276,7 @@ def parseOpts(overrideArguments=None): 'For example, to only match videos that have been liked more than ' '100 times and disliked less than 50 times (or the dislike ' 'functionality is not available at the given service), but who ' - 'also have a description, use --match-filter ' + 'also have a description, use --match-filter ' '"like_count > 100 & dislike_count <? 50 & description" .' )) selection.add_option( @@ -338,7 +338,7 @@ def parseOpts(overrideArguments=None): video_format.add_option( '-F', '--list-formats', action='store_true', dest='listformats', - help='List all available formats') + help='List all available formats of requested videos') video_format.add_option( '--youtube-include-dash-manifest', action='store_true', dest='youtube_include_dash_manifest', default=True, @@ -363,7 +363,7 @@ def parseOpts(overrideArguments=None): subtitles.add_option( '--write-auto-sub', '--write-automatic-sub', action='store_true', dest='writeautomaticsub', default=False, - help='Write automatic subtitle file (YouTube only)') + help='Write automatically generated subtitle file (YouTube only)') subtitles.add_option( '--all-subs', action='store_true', dest='allsubtitles', default=False, @@ -602,7 +602,7 @@ def parseOpts(overrideArguments=None): filesystem.add_option( '-A', '--auto-number', action='store_true', dest='autonumber', default=False, - help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000') + help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000') filesystem.add_option( '-t', '--title', action='store_true', dest='usetitle', default=False, diff --git a/youtube_dl/postprocessor/ffmpeg.py b/youtube_dl/postprocessor/ffmpeg.py index 4f320e124..daca5d814 100644 --- a/youtube_dl/postprocessor/ffmpeg.py +++ b/youtube_dl/postprocessor/ffmpeg.py @@ -52,7 +52,7 @@ class FFmpegPostProcessor(PostProcessor): def _determine_executables(self): programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] - prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False) + prefer_ffmpeg = False self.basename = None self.probe_basename = None @@ -60,6 +60,7 @@ class FFmpegPostProcessor(PostProcessor): self._paths = None self._versions = None if self._downloader: + prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False) location = self._downloader.params.get('ffmpeg_location') if location is not None: if not os.path.exists(location): @@ -272,7 +273,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor): return [], information try: - self._downloader.to_screen('[' + self.basename + '] Destination: ' + new_path) + self._downloader.to_screen('[ffmpeg] Destination: ' + new_path) self.run_ffmpeg(path, new_path, acodec, more_opts) except AudioConversionError as e: raise PostProcessingError( diff --git a/youtube_dl/update.py b/youtube_dl/update.py index fc7ac8305..995b8ed96 100644 --- a/youtube_dl/update.py +++ b/youtube_dl/update.py @@ -9,11 +9,8 @@ import subprocess import sys from zipimport import zipimporter -from .compat import ( - compat_str, - compat_urllib_request, -) -from .utils import make_HTTPS_handler +from .utils import encode_compat_str + from .version import __version__ @@ -47,7 +44,7 @@ def rsa_verify(message, signature, key): return True -def update_self(to_screen, verbose): +def update_self(to_screen, verbose, opener): """Update the program file with the latest version from the repository""" UPDATE_URL = "https://rg3.github.io/youtube-dl/update/" @@ -59,15 +56,12 @@ def update_self(to_screen, verbose): to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.') return - https_handler = make_HTTPS_handler({}) - opener = compat_urllib_request.build_opener(https_handler) - # Check if there is a new version try: newversion = opener.open(VERSION_URL).read().decode('utf-8').strip() except Exception: if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t find the current version. Please try again later.') return if newversion == __version__: @@ -80,7 +74,7 @@ def update_self(to_screen, verbose): versions_info = json.loads(versions_info) except Exception: if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t obtain versions info. Please try again later.') return if 'signature' not in versions_info: @@ -129,7 +123,7 @@ def update_self(to_screen, verbose): urlh.close() except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return @@ -143,7 +137,7 @@ def update_self(to_screen, verbose): outf.write(newcontent) except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to write the new version') return @@ -163,7 +157,7 @@ start /b "" cmd /c del "%%~f0"&exit /b" return # Do not show premature success messages except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return @@ -175,7 +169,7 @@ start /b "" cmd /c del "%%~f0"&exit /b" urlh.close() except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return @@ -189,7 +183,7 @@ start /b "" cmd /c del "%%~f0"&exit /b" outf.write(newcontent) except (IOError, OSError): if verbose: - to_screen(compat_str(traceback.format_exc())) + to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index 1dc3153fd..0ed6c45c8 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals +import base64 import calendar import codecs import contextlib @@ -35,6 +36,7 @@ import zlib from .compat import ( compat_basestring, compat_chr, + compat_etree_fromstring, compat_html_entities, compat_http_client, compat_kwargs, @@ -177,10 +179,19 @@ def xpath_with_ns(path, ns_map): def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): - if sys.version_info < (2, 7): # Crazy 2.6 - xpath = xpath.encode('ascii') + def _find_xpath(xpath): + if sys.version_info < (2, 7): # Crazy 2.6 + xpath = xpath.encode('ascii') + return node.find(xpath) + + if isinstance(xpath, (str, compat_str)): + n = _find_xpath(xpath) + else: + for xp in xpath: + n = _find_xpath(xp) + if n is not None: + break - n = node.find(xpath) if n is None: if default is not NO_DEFAULT: return default @@ -355,13 +366,20 @@ def sanitize_path(s): if drive_or_unc: norm_path.pop(0) sanitized_path = [ - path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part) + path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path) +# Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of +# unwanted failures due to missing protocol +def sanitized_Request(url, *args, **kwargs): + return compat_urllib_request.Request( + 'http:%s' % url if url.startswith('//') else url, *args, **kwargs) + + def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] @@ -385,10 +403,14 @@ def _htmlentity_transform(entity): numstr = '0%s' % numstr else: base = 10 - return compat_chr(int(numstr, base)) + # See https://github.com/rg3/youtube-dl/issues/7518 + try: + return compat_chr(int(numstr, base)) + except ValueError: + pass # Unknown entity in name, return its literal representation - return ('&%s;' % entity) + return '&%s;' % entity def unescapeHTML(s): @@ -641,6 +663,16 @@ def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): return hc +def handle_youtubedl_headers(headers): + filtered_headers = headers + + if 'Youtubedl-no-compression' in filtered_headers: + filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') + del filtered_headers['Youtubedl-no-compression'] + + return filtered_headers + + class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. @@ -648,7 +680,7 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): the standard headers to every HTTP request and handles gzipped and deflated responses from web servers. If compression is to be avoided in a particular request, the original request in the program code only has - to include the HTTP header "Youtubedl-No-Compression", which will be + to include the HTTP header "Youtubedl-no-compression", which will be removed before making the real request. Part of this code was copied from: @@ -709,10 +741,8 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): # The dict keys are capitalized because of this bug by urllib if h.capitalize() not in req.headers: req.add_header(h, v) - if 'Youtubedl-no-compression' in req.headers: - if 'Accept-encoding' in req.headers: - del req.headers['Accept-encoding'] - del req.headers['Youtubedl-no-compression'] + + req.headers = handle_youtubedl_headers(req.headers) if sys.version_info < (2, 7) and '#' in req.get_full_url(): # Python 2.6 is brain-dead when it comes to fragments @@ -743,11 +773,13 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): raise original_ioerror resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg + del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg + del resp.headers['Content-encoding'] # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see # https://github.com/rg3/youtube-dl/issues/6457). if 300 <= resp.code < 400: @@ -813,9 +845,11 @@ def parse_iso8601(date_str, delimiter='T', timezone=None): if date_str is None: return None + date_str = re.sub(r'\.[0-9]+', '', date_str) + if timezone is None: m = re.search( - r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', + r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)', date_str) if not m: timezone = datetime.timedelta() @@ -828,9 +862,12 @@ def parse_iso8601(date_str, delimiter='T', timezone=None): timezone = datetime.timedelta( hours=sign * int(m.group('hours')), minutes=sign * int(m.group('minutes'))) - date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) - dt = datetime.datetime.strptime(date_str, date_format) - timezone - return calendar.timegm(dt.timetuple()) + try: + date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) + dt = datetime.datetime.strptime(date_str, date_format) - timezone + return calendar.timegm(dt.timetuple()) + except ValueError: + pass def unified_strdate(date_str, day_first=True): @@ -895,7 +932,8 @@ def unified_strdate(date_str, day_first=True): timetuple = email.utils.parsedate_tz(date_str) if timetuple: upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') - return upload_date + if upload_date is not None: + return compat_str(upload_date) def determine_ext(url, default_ext='unknown_video'): @@ -904,6 +942,21 @@ def determine_ext(url, default_ext='unknown_video'): guess = url.partition('?')[0].rpartition('.')[2] if re.match(r'^[A-Za-z0-9]+$', guess): return guess + elif guess.rstrip('/') in ( + 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', + 'flv', 'f4v', 'f4a', 'f4b', + 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', + 'mkv', 'mka', 'mk3d', + 'avi', 'divx', + 'mov', + 'asf', 'wmv', 'wma', + '3gp', '3g2', + 'mp3', + 'flac', + 'ape', + 'wav', + 'f4f', 'f4m', 'm3u8', 'smil'): + return guess.rstrip('/') else: return default_ext @@ -1355,6 +1408,15 @@ def remove_end(s, end): return s +def remove_quotes(s): + if s is None or len(s) < 2: + return s + for quote in ('"', "'", ): + if s[0] == quote and s[-1] == quote: + return s[1:-1] + return s + + def url_basename(url): path = compat_urlparse.urlparse(url).path return path.strip('/').split('/')[-1] @@ -1371,7 +1433,12 @@ def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): v = getattr(v, get_attr, None) if v == '': v = None - return default if v is None else (int(v) * invscale // scale) + if v is None: + return default + try: + return int(v) * invscale // scale + except ValueError: + return default def str_or_none(v, default=None): @@ -1387,7 +1454,12 @@ def str_to_int(int_str): def float_or_none(v, scale=1, invscale=1, default=None): - return default if v is None else (float(v) * invscale / scale) + if v is None: + return default + try: + return float(v) * invscale / scale + except ValueError: + return default def parse_duration(s): @@ -1637,30 +1709,13 @@ def urlencode_postdata(*args, **kargs): def encode_dict(d, encoding='utf-8'): - return dict((k.encode(encoding), v.encode(encoding)) for k, v in d.items()) - - -try: - etree_iter = xml.etree.ElementTree.Element.iter -except AttributeError: # Python <=2.6 - etree_iter = lambda n: n.findall('.//*') - + def encode(v): + return v.encode(encoding) if isinstance(v, compat_basestring) else v + return dict((encode(k), encode(v)) for k, v in d.items()) -def parse_xml(s): - class TreeBuilder(xml.etree.ElementTree.TreeBuilder): - def doctype(self, name, pubid, system): - pass # Ignore doctypes - parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder()) - kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {} - tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs) - # Fix up XML parser in Python 2.x - if sys.version_info < (3, 0): - for n in etree_iter(tree): - if n.text is not None: - if not isinstance(n.text, compat_str): - n.text = n.text.decode('utf-8') - return tree +def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): + return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) US_RATINGS = { @@ -1690,8 +1745,8 @@ def js_to_json(code): if v in ('true', 'false', 'null'): return v if v.startswith('"'): - return v - if v.startswith("'"): + v = re.sub(r"\\'", "'", v[1:-1]) + elif v.startswith("'"): v = v[1:-1] v = re.sub(r"\\\\|\\'|\"", lambda m: { '\\\\': '\\\\', @@ -1757,6 +1812,15 @@ def args_to_str(args): return ' '.join(shlex_quote(a) for a in args) +def error_to_compat_str(err): + err_str = str(err) + # On python 2 error byte string must be decoded with proper + # encoding rather than ascii + if sys.version_info[0] < 3: + err_str = err_str.decode(preferredencoding()) + return err_str + + def mimetype2ext(mt): _, _, res = mt.rpartition('/') @@ -1785,6 +1849,10 @@ def urlhandle_detect_ext(url_handle): return mimetype2ext(getheader('Content-Type')) +def encode_data_uri(data, mime_type): + return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii')) + + def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ @@ -1923,15 +1991,15 @@ def match_filter_func(filter_str): def parse_dfxp_time_expr(time_expr): if not time_expr: - return 0.0 + return mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) if mobj: return float(mobj.group('time_offset')) - mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr) + mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr) if mobj: - return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3)) + return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.')) def srt_subtitles_timecode(seconds): @@ -1959,7 +2027,7 @@ def dfxp2srt(dfxp_data): return out - dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8')) + dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8')) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p') @@ -1967,10 +2035,15 @@ def dfxp2srt(dfxp_data): raise ValueError('Invalid dfxp/TTML subtitle') for para, index in zip(paras, itertools.count(1)): - begin_time = parse_dfxp_time_expr(para.attrib['begin']) + begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) + dur = parse_dfxp_time_expr(para.attrib.get('dur')) + if begin_time is None: + continue if not end_time: - end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur']) + if not dur: + continue + end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 7ef4f2755..a62baa305 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,3 +1,3 @@ from __future__ import unicode_literals -__version__ = '2015.09.22' +__version__ = '2015.12.29' |