diff options
Diffstat (limited to 'youtube_dl')
112 files changed, 7137 insertions, 1296 deletions
diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 155895fe2..8ecabab1a 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -64,31 +64,56 @@ class FileDownloader(object): return '%.2f%s' % (converted, suffix) @staticmethod + def format_seconds(seconds): + (mins, secs) = divmod(seconds, 60) + (hours, mins) = divmod(mins, 60) + if hours > 99: + return '--:--:--' + if hours == 0: + return '%02d:%02d' % (mins, secs) + else: + return '%02d:%02d:%02d' % (hours, mins, secs) + + @staticmethod def calc_percent(byte_counter, data_len): if data_len is None: + return None + return float(byte_counter) / float(data_len) * 100.0 + + @staticmethod + def format_percent(percent): + if percent is None: return '---.-%' - return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) + return '%6s' % ('%3.1f%%' % percent) @staticmethod def calc_eta(start, now, total, current): if total is None: - return '--:--' + return None dif = now - start if current == 0 or dif < 0.001: # One millisecond - return '--:--' + return None rate = float(current) / dif - eta = int((float(total) - float(current)) / rate) - (eta_mins, eta_secs) = divmod(eta, 60) - if eta_mins > 99: + return int((float(total) - float(current)) / rate) + + @staticmethod + def format_eta(eta): + if eta is None: return '--:--' - return '%02d:%02d' % (eta_mins, eta_secs) + return FileDownloader.format_seconds(eta) @staticmethod def calc_speed(start, now, bytes): dif = now - start if bytes == 0 or dif < 0.001: # One millisecond + return None + return float(bytes) / dif + + @staticmethod + def format_speed(speed): + if speed is None: return '%10s' % '---b/s' - return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) + return '%10s' % ('%s/s' % FileDownloader.format_bytes(speed)) @staticmethod def best_block_size(elapsed_time, bytes): @@ -197,11 +222,14 @@ class FileDownloader(object): """Report destination filename.""" self.to_screen(u'[download] Destination: ' + filename) - def report_progress(self, percent_str, data_len_str, speed_str, eta_str): + def report_progress(self, percent, data_len_str, speed, eta): """Report download progress.""" if self.params.get('noprogress', False): return clear_line = (u'\x1b[K' if sys.stderr.isatty() and os.name != 'nt' else u'') + eta_str = self.format_eta(eta) + percent_str = self.format_percent(percent) + speed_str = self.format_speed(speed) if self.params.get('progress_with_newline', False): self.to_screen(u'[download] %s of %s at %s ETA %s' % (percent_str, data_len_str, speed_str, eta_str)) @@ -230,16 +258,19 @@ class FileDownloader(object): """Report it was impossible to resume download.""" self.to_screen(u'[download] Unable to resume') - def report_finish(self): + def report_finish(self, data_len_str, tot_time): """Report download finished.""" if self.params.get('noprogress', False): self.to_screen(u'[download] Download completed') else: - self.to_screen(u'') + clear_line = (u'\x1b[K' if sys.stderr.isatty() and os.name != 'nt' else u'') + self.to_screen(u'\r%s[download] 100%% of %s in %s' % + (clear_line, data_len_str, self.format_seconds(tot_time))) def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path, tc_url): self.report_destination(filename) tmpfilename = self.temp_name(filename) + test = self.params.get('test', False) # Check for rtmpdump first try: @@ -261,6 +292,8 @@ class FileDownloader(object): basic_args += ['--playpath', play_path] if tc_url is not None: basic_args += ['--tcUrl', url] + if test: + basic_args += ['--stop', '1'] args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)] if self.params.get('verbose', False): try: @@ -270,7 +303,7 @@ class FileDownloader(object): shell_quote = repr self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args)) retval = subprocess.call(args) - while retval == 2 or retval == 1: + while (retval == 2 or retval == 1) and not test: prevsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) time.sleep(5.0) # This seems to be needed @@ -283,7 +316,7 @@ class FileDownloader(object): self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') retval = 0 break - if retval == 0: + if retval == 0 or (test and retval == 2): fsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen(u'\r[rtmpdump] %s bytes' % fsize) self.try_rename(tmpfilename, filename) @@ -329,6 +362,35 @@ class FileDownloader(object): self.report_error(u'mplayer exited with code %d' % retval) return False + def _download_m3u8_with_ffmpeg(self, filename, url): + self.report_destination(filename) + tmpfilename = self.temp_name(filename) + + args = ['ffmpeg', '-y', '-i', url, '-f', 'mp4', tmpfilename] + # Check for ffmpeg first + try: + subprocess.call(['ffmpeg', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT) + except (OSError, IOError): + self.report_error(u'm3u8 download detected but "%s" could not be run' % args[0] ) + return False + + retval = subprocess.call(args) + if retval == 0: + fsize = os.path.getsize(encodeFilename(tmpfilename)) + self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize)) + self.try_rename(tmpfilename, filename) + self._hook_progress({ + 'downloaded_bytes': fsize, + 'total_bytes': fsize, + 'filename': filename, + 'status': 'finished', + }) + return True + else: + self.to_stderr(u"\n") + self.report_error(u'ffmpeg exited with code %d' % retval) + return False + def _do_download(self, filename, info_dict): url = info_dict['url'] @@ -339,6 +401,7 @@ class FileDownloader(object): self._hook_progress({ 'filename': filename, 'status': 'finished', + 'total_bytes': os.path.getsize(encodeFilename(filename)), }) return True @@ -354,6 +417,10 @@ class FileDownloader(object): if url.startswith('mms') or url.startswith('rtsp'): return self._download_with_mplayer(filename, url) + # m3u8 manifest are downloaded with ffmpeg + if determine_ext(url) == u'm3u8': + return self._download_m3u8_with_ffmpeg(filename, url) + tmpfilename = self.temp_name(filename) stream = None @@ -481,13 +548,14 @@ class FileDownloader(object): block_size = self.best_block_size(after - before, len(data_block)) # Progress message - speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) + speed = self.calc_speed(start, time.time(), byte_counter - resume_len) if data_len is None: self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') + eta = None else: - percent_str = self.calc_percent(byte_counter, data_len) - eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) - self.report_progress(percent_str, data_len_str, speed_str, eta_str) + percent = self.calc_percent(byte_counter, data_len) + eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) + self.report_progress(percent, data_len_str, speed, eta) self._hook_progress({ 'downloaded_bytes': byte_counter, @@ -495,6 +563,8 @@ class FileDownloader(object): 'tmpfilename': tmpfilename, 'filename': filename, 'status': 'downloading', + 'eta': eta, + 'speed': speed, }) # Apply rate limit @@ -505,7 +575,7 @@ class FileDownloader(object): self.report_error(u'Did not get any data blocks') return False stream.close() - self.report_finish() + self.report_finish(data_len_str, (time.time() - start)) if data_len is not None and byte_counter != data_len: raise ContentTooShortError(byte_counter, int(data_len)) self.try_rename(tmpfilename, filename) @@ -537,6 +607,8 @@ class FileDownloader(object): * downloaded_bytes: Bytes on disks * total_bytes: Total bytes, None if unknown * tmpfilename: The filename we're currently writing to + * eta: The estimated time in seconds, None if unknown + * speed: The download speed in bytes/second, None if unknown Hooks are guaranteed to be called at least once (with status "finished") if the download is successful. diff --git a/youtube_dl/PostProcessor.py b/youtube_dl/PostProcessor.py index 8c5e53991..13b56ede5 100644 --- a/youtube_dl/PostProcessor.py +++ b/youtube_dl/PostProcessor.py @@ -3,7 +3,14 @@ import subprocess import sys import time -from .utils import * + +from .utils import ( + compat_subprocess_get_DEVNULL, + encodeFilename, + PostProcessingError, + shell_quote, + subtitles_filename, +) class PostProcessor(object): @@ -71,12 +78,19 @@ class FFmpegPostProcessor(PostProcessor): programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] return dict((program, executable(program)) for program in programs) - def run_ffmpeg(self, path, out_path, opts): + def run_ffmpeg_multiple_files(self, input_paths, out_path, opts): if not self._exes['ffmpeg'] and not self._exes['avconv']: raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.') - cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path)] + + files_cmd = [] + for path in input_paths: + files_cmd.extend(['-i', encodeFilename(path)]) + cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y'] + files_cmd + opts + [encodeFilename(self._ffmpeg_filename_argument(out_path))]) + + if self._downloader.params.get('verbose', False): + self._downloader.to_screen(u'[debug] ffmpeg command line: %s' % shell_quote(cmd)) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout,stderr = p.communicate() if p.returncode != 0: @@ -84,6 +98,9 @@ class FFmpegPostProcessor(PostProcessor): msg = stderr.strip().split('\n')[-1] raise FFmpegPostProcessorError(msg) + def run_ffmpeg(self, path, out_path, opts): + self.run_ffmpeg_multiple_files([path], out_path, opts) + def _ffmpeg_filename_argument(self, fn): # ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details if fn.startswith(u'-'): @@ -100,7 +117,8 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor): self._nopostoverwrites = nopostoverwrites def get_audio_codec(self, path): - if not self._exes['ffprobe'] and not self._exes['avprobe']: return None + if not self._exes['ffprobe'] and not self._exes['avprobe']: + raise PostProcessingError(u'ffprobe or avprobe not found. Please install one.') try: cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path))] handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE) @@ -128,7 +146,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor): try: FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts) except FFmpegPostProcessorError as err: - raise AudioConversionError(err.message) + raise AudioConversionError(err.msg) def run(self, information): path = information['filepath'] @@ -168,7 +186,8 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor): extension = self._preferredcodec more_opts = [] if self._preferredquality is not None: - if int(self._preferredquality) < 10: + # The opus codec doesn't support the -aq option + if int(self._preferredquality) < 10 and extension != 'opus': more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] else: more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] @@ -198,7 +217,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor): except: etype,e,tb = sys.exc_info() if isinstance(e, AudioConversionError): - msg = u'audio conversion failed: ' + e.message + msg = u'audio conversion failed: ' + e.msg else: msg = u'error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') raise PostProcessingError(msg) @@ -208,7 +227,7 @@ class FFmpegExtractAudioPP(FFmpegPostProcessor): try: os.utime(encodeFilename(new_path), (time.time(), information['filetime'])) except: - self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file') + self._downloader.report_warning(u'Cannot update utime of audio file') information['filepath'] = new_path return self._nopostoverwrites,information @@ -231,3 +250,262 @@ class FFmpegVideoConvertor(FFmpegPostProcessor): information['format'] = self._preferedformat information['ext'] = self._preferedformat return False,information + + +class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): + # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt + _lang_map = { + 'aa': 'aar', + 'ab': 'abk', + 'ae': 'ave', + 'af': 'afr', + 'ak': 'aka', + 'am': 'amh', + 'an': 'arg', + 'ar': 'ara', + 'as': 'asm', + 'av': 'ava', + 'ay': 'aym', + 'az': 'aze', + 'ba': 'bak', + 'be': 'bel', + 'bg': 'bul', + 'bh': 'bih', + 'bi': 'bis', + 'bm': 'bam', + 'bn': 'ben', + 'bo': 'bod', + 'br': 'bre', + 'bs': 'bos', + 'ca': 'cat', + 'ce': 'che', + 'ch': 'cha', + 'co': 'cos', + 'cr': 'cre', + 'cs': 'ces', + 'cu': 'chu', + 'cv': 'chv', + 'cy': 'cym', + 'da': 'dan', + 'de': 'deu', + 'dv': 'div', + 'dz': 'dzo', + 'ee': 'ewe', + 'el': 'ell', + 'en': 'eng', + 'eo': 'epo', + 'es': 'spa', + 'et': 'est', + 'eu': 'eus', + 'fa': 'fas', + 'ff': 'ful', + 'fi': 'fin', + 'fj': 'fij', + 'fo': 'fao', + 'fr': 'fra', + 'fy': 'fry', + 'ga': 'gle', + 'gd': 'gla', + 'gl': 'glg', + 'gn': 'grn', + 'gu': 'guj', + 'gv': 'glv', + 'ha': 'hau', + 'he': 'heb', + 'hi': 'hin', + 'ho': 'hmo', + 'hr': 'hrv', + 'ht': 'hat', + 'hu': 'hun', + 'hy': 'hye', + 'hz': 'her', + 'ia': 'ina', + 'id': 'ind', + 'ie': 'ile', + 'ig': 'ibo', + 'ii': 'iii', + 'ik': 'ipk', + 'io': 'ido', + 'is': 'isl', + 'it': 'ita', + 'iu': 'iku', + 'ja': 'jpn', + 'jv': 'jav', + 'ka': 'kat', + 'kg': 'kon', + 'ki': 'kik', + 'kj': 'kua', + 'kk': 'kaz', + 'kl': 'kal', + 'km': 'khm', + 'kn': 'kan', + 'ko': 'kor', + 'kr': 'kau', + 'ks': 'kas', + 'ku': 'kur', + 'kv': 'kom', + 'kw': 'cor', + 'ky': 'kir', + 'la': 'lat', + 'lb': 'ltz', + 'lg': 'lug', + 'li': 'lim', + 'ln': 'lin', + 'lo': 'lao', + 'lt': 'lit', + 'lu': 'lub', + 'lv': 'lav', + 'mg': 'mlg', + 'mh': 'mah', + 'mi': 'mri', + 'mk': 'mkd', + 'ml': 'mal', + 'mn': 'mon', + 'mr': 'mar', + 'ms': 'msa', + 'mt': 'mlt', + 'my': 'mya', + 'na': 'nau', + 'nb': 'nob', + 'nd': 'nde', + 'ne': 'nep', + 'ng': 'ndo', + 'nl': 'nld', + 'nn': 'nno', + 'no': 'nor', + 'nr': 'nbl', + 'nv': 'nav', + 'ny': 'nya', + 'oc': 'oci', + 'oj': 'oji', + 'om': 'orm', + 'or': 'ori', + 'os': 'oss', + 'pa': 'pan', + 'pi': 'pli', + 'pl': 'pol', + 'ps': 'pus', + 'pt': 'por', + 'qu': 'que', + 'rm': 'roh', + 'rn': 'run', + 'ro': 'ron', + 'ru': 'rus', + 'rw': 'kin', + 'sa': 'san', + 'sc': 'srd', + 'sd': 'snd', + 'se': 'sme', + 'sg': 'sag', + 'si': 'sin', + 'sk': 'slk', + 'sl': 'slv', + 'sm': 'smo', + 'sn': 'sna', + 'so': 'som', + 'sq': 'sqi', + 'sr': 'srp', + 'ss': 'ssw', + 'st': 'sot', + 'su': 'sun', + 'sv': 'swe', + 'sw': 'swa', + 'ta': 'tam', + 'te': 'tel', + 'tg': 'tgk', + 'th': 'tha', + 'ti': 'tir', + 'tk': 'tuk', + 'tl': 'tgl', + 'tn': 'tsn', + 'to': 'ton', + 'tr': 'tur', + 'ts': 'tso', + 'tt': 'tat', + 'tw': 'twi', + 'ty': 'tah', + 'ug': 'uig', + 'uk': 'ukr', + 'ur': 'urd', + 'uz': 'uzb', + 've': 'ven', + 'vi': 'vie', + 'vo': 'vol', + 'wa': 'wln', + 'wo': 'wol', + 'xh': 'xho', + 'yi': 'yid', + 'yo': 'yor', + 'za': 'zha', + 'zh': 'zho', + 'zu': 'zul', + } + + def __init__(self, downloader=None, subtitlesformat='srt'): + super(FFmpegEmbedSubtitlePP, self).__init__(downloader) + self._subformat = subtitlesformat + + @classmethod + def _conver_lang_code(cls, code): + """Convert language code from ISO 639-1 to ISO 639-2/T""" + return cls._lang_map.get(code[:2]) + + def run(self, information): + if information['ext'] != u'mp4': + self._downloader.to_screen(u'[ffmpeg] Subtitles can only be embedded in mp4 files') + return True, information + if not information.get('subtitles'): + self._downloader.to_screen(u'[ffmpeg] There aren\'t any subtitles to embed') + return True, information + + sub_langs = [key for key in information['subtitles']] + filename = information['filepath'] + input_files = [filename] + [subtitles_filename(filename, lang, self._subformat) for lang in sub_langs] + + opts = ['-map', '0:0', '-map', '0:1', '-c:v', 'copy', '-c:a', 'copy'] + for (i, lang) in enumerate(sub_langs): + opts.extend(['-map', '%d:0' % (i+1), '-c:s:%d' % i, 'mov_text']) + lang_code = self._conver_lang_code(lang) + if lang_code is not None: + opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) + opts.extend(['-f', 'mp4']) + + temp_filename = filename + u'.temp' + self._downloader.to_screen(u'[ffmpeg] Embedding subtitles in \'%s\'' % filename) + self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) + os.remove(encodeFilename(filename)) + os.rename(encodeFilename(temp_filename), encodeFilename(filename)) + + return True, information + + +class FFmpegMetadataPP(FFmpegPostProcessor): + def run(self, info): + metadata = {} + if info.get('title') is not None: + metadata['title'] = info['title'] + if info.get('upload_date') is not None: + metadata['date'] = info['upload_date'] + if info.get('uploader') is not None: + metadata['artist'] = info['uploader'] + elif info.get('uploader_id') is not None: + metadata['artist'] = info['uploader_id'] + + if not metadata: + self._downloader.to_screen(u'[ffmpeg] There isn\'t any metadata to add') + return True, info + + filename = info['filepath'] + ext = os.path.splitext(filename)[1][1:] + temp_filename = filename + u'.temp' + + options = ['-c', 'copy'] + for (name, value) in metadata.items(): + options.extend(['-metadata', '%s="%s"' % (name, value)]) + options.extend(['-f', ext]) + + self._downloader.to_screen(u'[ffmpeg] Adding metadata to \'%s\'' % filename) + self.run_ffmpeg(filename, temp_filename, options) + os.remove(encodeFilename(filename)) + os.rename(encodeFilename(temp_filename), encodeFilename(filename)) + return True, info diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py index c76f1118e..c8054544a 100644 --- a/youtube_dl/YoutubeDL.py +++ b/youtube_dl/YoutubeDL.py @@ -3,6 +3,7 @@ from __future__ import absolute_import +import errno import io import os import re @@ -70,16 +71,26 @@ class YoutubeDL(object): logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file + writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatic subtitles to a file allsubtitles: Downloads all the subtitles of the video + (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: Subtitle format [srt/sbv/vtt] (default=srt) - subtitleslang: Language of the subtitles to download + subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file + cachedir: Location of the cache files in the filesystem. + None to disable filesystem cache. + noplaylist: Download single video instead of a playlist if in doubt. + age_limit: An integer representing the user's age in years. + Unsuitable videos for the given age are skipped. + downloadarchive: File name of a file where all downloads are recorded. + Videos already present in the file are not downloaded + again. The following parameters are not used by YoutubeDL itself, they are used by the FileDownloader: @@ -97,11 +108,23 @@ class YoutubeDL(object): def __init__(self, params): """Create a FileDownloader object with the given options.""" self._ies = [] + self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] + + if (sys.version_info >= (3,) and sys.platform != 'win32' and + sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] + and not params['restrictfilenames']): + # On Python 3, the Unicode filesystem API will throw errors (#1474) + self.report_warning( + u'Assuming --restrict-filenames since file system encoding ' + u'cannot encode all charactes. ' + u'Set the LC_ALL environment variable to fix this.') + params['restrictfilenames'] = True + self.params = params self.fd = FileDownloader(self, self.params) @@ -111,8 +134,21 @@ class YoutubeDL(object): def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) + self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) + def get_info_extractor(self, ie_key): + """ + Get an instance of an IE with name ie_key, it will try to get one from + the _ies list, if there's no instance it will create a new one and add + it to the extractor list. + """ + ie = self._ies_instances.get(ie_key) + if ie is None: + ie = get_info_extractor(ie_key)() + self.add_info_extractor(ie) + return ie + def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list @@ -127,14 +163,10 @@ class YoutubeDL(object): def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" - assert type(message) == type(u'') if not self.params.get('quiet', False): terminator = [u'\n', u''][skip_eol] output = message + terminator - if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr - output = output.encode(preferredencoding(), 'ignore') - self._screen_file.write(output) - self._screen_file.flush() + write_string(output, self._screen_file) def to_stderr(self, message): """Print message to stderr.""" @@ -227,6 +259,10 @@ class YoutubeDL(object): """ Report that the metadata file has been written """ self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) + def report_writeannotations(self, annofn): + """ Report that the annotations file has been written. """ + self.to_screen(u'[info] Writing video annotations to: ' + annofn) + def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: @@ -264,7 +300,7 @@ class YoutubeDL(object): self.report_error(u'Erroneous output template') return None except ValueError as err: - self.report_error(u'Insufficient system charset ' + repr(preferredencoding())) + self.report_error(u'Error in output template: ' + str(err) + u' (encoding: ' + repr(preferredencoding()) + ')') return None def _match_entry(self, info_dict): @@ -284,6 +320,13 @@ class YoutubeDL(object): dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: return u'[download] %s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) + age_limit = self.params.get('age_limit') + if age_limit is not None: + if age_limit < info_dict.get('age_limit', 0): + return u'Skipping "' + title + '" because it is age restricted' + if self.in_download_archive(info_dict): + return (u'%(title)s has already been recorded in archive' + % info_dict) return None def extract_info(self, url, download=True, ie_key=None, extra_info={}): @@ -294,9 +337,7 @@ class YoutubeDL(object): ''' if ie_key: - ie = get_info_extractor(ie_key)() - ie.set_downloader(self) - ies = [ie] + ies = [self.get_info_extractor(ie_key)] else: ies = self._ies @@ -448,7 +489,8 @@ class YoutubeDL(object): if self.params.get('forceid', False): compat_print(info_dict['id']) if self.params.get('forceurl', False): - compat_print(info_dict['url']) + # For RTMP URLs, also include the playpath + compat_print(info_dict['url'] + info_dict.get('play_path', u'')) if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: compat_print(info_dict['thumbnail']) if self.params.get('forcedescription', False) and 'description' in info_dict: @@ -479,45 +521,45 @@ class YoutubeDL(object): self.report_writedescription(descfn) with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(info_dict['description']) + except (KeyError, TypeError): + self.report_warning(u'There\'s no description to write.') except (OSError, IOError): self.report_error(u'Cannot write description file ' + descfn) return - if (self.params.get('writesubtitles', False) or self.params.get('writeautomaticsub')) and 'subtitles' in info_dict and info_dict['subtitles']: + if self.params.get('writeannotations', False): + try: + annofn = filename + u'.annotations.xml' + self.report_writeannotations(annofn) + with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: + annofile.write(info_dict['annotations']) + except (KeyError, TypeError): + self.report_warning(u'There are no annotations to write.') + except (OSError, IOError): + self.report_error(u'Cannot write annotations file: ' + annofn) + return + + subtitles_are_requested = any([self.params.get('writesubtitles', False), + self.params.get('writeautomaticsub')]) + + if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']: # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE - subtitle = info_dict['subtitles'][0] - (sub_error, sub_lang, sub) = subtitle + subtitles = info_dict['subtitles'] sub_format = self.params.get('subtitlesformat') - if sub_error: - self.report_warning("Some error while getting the subtitles") - else: + for sub_lang in subtitles.keys(): + sub = subtitles[sub_lang] + if sub is None: + continue try: - sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format + sub_filename = subtitles_filename(filename, sub_lang, sub_format) self.report_writesubtitles(sub_filename) with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: - subfile.write(sub) + subfile.write(sub) except (OSError, IOError): self.report_error(u'Cannot write subtitles file ' + descfn) return - if self.params.get('allsubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: - subtitles = info_dict['subtitles'] - sub_format = self.params.get('subtitlesformat') - for subtitle in subtitles: - (sub_error, sub_lang, sub) = subtitle - if sub_error: - self.report_warning("Some error while getting the subtitles") - else: - try: - sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format - self.report_writesubtitles(sub_filename) - with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: - subfile.write(sub) - except (OSError, IOError): - self.report_error(u'Cannot write subtitles file ' + descfn) - return - if self.params.get('writeinfojson', False): infofn = filename + u'.info.json' self.report_writeinfojson(infofn) @@ -534,11 +576,15 @@ class YoutubeDL(object): thumb_filename = filename.rpartition('.')[0] + u'.' + thumb_format self.to_screen(u'[%s] %s: Downloading thumbnail ...' % (info_dict['extractor'], info_dict['id'])) - uf = compat_urllib_request.urlopen(info_dict['thumbnail']) - with open(thumb_filename, 'wb') as thumbf: - shutil.copyfileobj(uf, thumbf) - self.to_screen(u'[%s] %s: Writing thumbnail to: %s' % - (info_dict['extractor'], info_dict['id'], thumb_filename)) + try: + uf = compat_urllib_request.urlopen(info_dict['thumbnail']) + with open(thumb_filename, 'wb') as thumbf: + shutil.copyfileobj(uf, thumbf) + self.to_screen(u'[%s] %s: Writing thumbnail to: %s' % + (info_dict['extractor'], info_dict['id'], thumb_filename)) + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self.report_warning(u'Unable to download thumbnail "%s": %s' % + (info_dict['thumbnail'], compat_str(err))) if not self.params.get('skip_download', False): if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): @@ -546,11 +592,11 @@ class YoutubeDL(object): else: try: success = self.fd._do_download(filename, info_dict) - except (OSError, IOError) as err: - raise UnavailableVideoError() except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: self.report_error(u'unable to download video data: %s' % str(err)) return + except (OSError, IOError) as err: + raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) return @@ -562,6 +608,8 @@ class YoutubeDL(object): self.report_error(u'postprocessing: %s' % str(err)) return + self.record_download_archive(info_dict) + def download(self, url_list): """Download a given list of URLs.""" if len(url_list) > 1 and self.fixed_template(): @@ -594,10 +642,33 @@ class YoutubeDL(object): # No clear decision yet, let IE decide keep_video = keep_video_wish except PostProcessingError as e: - self.to_stderr(u'ERROR: ' + e.msg) + self.report_error(e.msg) if keep_video is False and not self.params.get('keepvideo', False): try: self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename) os.remove(encodeFilename(filename)) except (IOError, OSError): self.report_warning(u'Unable to remove downloaded video file') + + def in_download_archive(self, info_dict): + fn = self.params.get('download_archive') + if fn is None: + return False + vid_id = info_dict['extractor'] + u' ' + info_dict['id'] + try: + with locked_file(fn, 'r', encoding='utf-8') as archive_file: + for line in archive_file: + if line.strip() == vid_id: + return True + except IOError as ioe: + if ioe.errno != errno.ENOENT: + raise + return False + + def record_download_archive(self, info_dict): + fn = self.params.get('download_archive') + if fn is None: + return + vid_id = info_dict['extractor'] + u' ' + info_dict['id'] + with locked_file(fn, 'a', encoding='utf-8') as archive_file: + archive_file.write(vid_id + u'\n') diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 250cf62f8..472ae9c0c 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -27,11 +27,17 @@ __authors__ = ( 'Johny Mo Swag', 'Axel Noack', 'Albert Kim', + 'Pierre Rudloff', + 'Huarong Huo', + 'Ismael Mejía', + 'Steffan \'Ruirize\' James', + 'Andras Elso', ) __license__ = 'Public Domain' import codecs +import collections import getpass import optparse import os @@ -41,16 +47,43 @@ import shlex import socket import subprocess import sys -import warnings +import traceback import platform -from .utils import * + +from .utils import ( + compat_cookiejar, + compat_print, + compat_str, + compat_urllib_request, + DateRange, + decodeOption, + determine_ext, + DownloadError, + get_cachedir, + make_HTTPS_handler, + MaxDownloadsReached, + platform_name, + preferredencoding, + SameFileError, + std_headers, + write_string, + YoutubeDLHandler, +) from .update import update_self from .version import __version__ -from .FileDownloader import * +from .FileDownloader import ( + FileDownloader, +) from .extractor import gen_extractors from .YoutubeDL import YoutubeDL -from .PostProcessor import * +from .PostProcessor import ( + FFmpegMetadataPP, + FFmpegVideoConvertor, + FFmpegExtractAudioPP, + FFmpegEmbedSubtitlePP, +) + def parseOpts(overrideArguments=None): def _readOptions(filename_bytes): @@ -82,6 +115,9 @@ def parseOpts(overrideArguments=None): return "".join(opts) + def _comma_separated_values_options_callback(option, opt_str, value, parser): + setattr(parser.values, option.dest, value.split(',')) + def _find_term_columns(): columns = os.environ.get('COLUMNS', None) if columns: @@ -95,6 +131,16 @@ def parseOpts(overrideArguments=None): pass return None + def _hide_login_info(opts): + opts = list(opts) + for private_opt in ['-p', '--password', '-u', '--username']: + try: + i = opts.index(private_opt) + opts[i+1] = '<PRIVATE>' + except ValueError: + pass + return opts + max_width = 80 max_help_position = 80 @@ -119,6 +165,7 @@ def parseOpts(overrideArguments=None): selection = optparse.OptionGroup(parser, 'Video Selection') authentication = optparse.OptionGroup(parser, 'Authentication Options') video_format = optparse.OptionGroup(parser, 'Video Format Options') + subtitles = optparse.OptionGroup(parser, 'Subtitle Options') downloader = optparse.OptionGroup(parser, 'Download Options') postproc = optparse.OptionGroup(parser, 'Post-processing Options') filesystem = optparse.OptionGroup(parser, 'Filesystem Options') @@ -129,9 +176,9 @@ def parseOpts(overrideArguments=None): general.add_option('-v', '--version', action='version', help='print program version and exit') general.add_option('-U', '--update', - action='store_true', dest='update_self', help='update this program to latest version') + action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)') general.add_option('-i', '--ignore-errors', - action='store_true', dest='ignoreerrors', help='continue on download errors', default=False) + action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False) general.add_option('--dump-user-agent', action='store_true', dest='dump_user_agent', help='display the current browser identification', default=False) @@ -148,6 +195,12 @@ def parseOpts(overrideArguments=None): help='Output descriptions of all supported extractors', default=False) general.add_option('--proxy', dest='proxy', default=None, help='Use the specified HTTP/HTTPS proxy', metavar='URL') general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.') + general.add_option( + '--cache-dir', dest='cachedir', default=get_cachedir(), + help='Location in the filesystem where youtube-dl can store downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl .') + general.add_option( + '--no-cache-dir', action='store_const', const=None, dest='cachedir', + help='Disable filesystem caching') selection.add_option('--playlist-start', @@ -162,6 +215,13 @@ def parseOpts(overrideArguments=None): selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None) selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None) selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None) + selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False) + selection.add_option('--age-limit', metavar='YEARS', dest='age_limit', + help='download only videos suitable for the given age', + default=None, type=int) + selection.add_option('--download-archive', metavar='FILE', + dest='download_archive', + help='Download only videos not present in the archive file. Record all downloaded videos in it.') authentication.add_option('-u', '--username', @@ -176,7 +236,7 @@ def parseOpts(overrideArguments=None): video_format.add_option('-f', '--format', action='store', dest='format', metavar='FORMAT', - help='video format code, specifiy the order of preference using slashes: "-f 22/17/18"') + help='video format code, specifiy the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported') video_format.add_option('--all-formats', action='store_const', dest='format', help='download all available video formats', const='all') video_format.add_option('--prefer-free-formats', @@ -185,27 +245,26 @@ def parseOpts(overrideArguments=None): action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') video_format.add_option('-F', '--list-formats', action='store_true', dest='listformats', help='list all available formats (currently youtube only)') - video_format.add_option('--write-sub', '--write-srt', + + subtitles.add_option('--write-sub', '--write-srt', action='store_true', dest='writesubtitles', - help='write subtitle file (currently youtube only)', default=False) - video_format.add_option('--write-auto-sub', '--write-automatic-sub', + help='write subtitle file', default=False) + subtitles.add_option('--write-auto-sub', '--write-automatic-sub', action='store_true', dest='writeautomaticsub', - help='write automatic subtitle file (currently youtube only)', default=False) - video_format.add_option('--only-sub', - action='store_true', dest='skip_download', - help='[deprecated] alias of --skip-download', default=False) - video_format.add_option('--all-subs', + help='write automatic subtitle file (youtube only)', default=False) + subtitles.add_option('--all-subs', action='store_true', dest='allsubtitles', - help='downloads all the available subtitles of the video (currently youtube only)', default=False) - video_format.add_option('--list-subs', + help='downloads all the available subtitles of the video', default=False) + subtitles.add_option('--list-subs', action='store_true', dest='listsubtitles', - help='lists all available subtitles for the video (currently youtube only)', default=False) - video_format.add_option('--sub-format', + help='lists all available subtitles for the video', default=False) + subtitles.add_option('--sub-format', action='store', dest='subtitlesformat', metavar='FORMAT', - help='subtitle format [srt/sbv/vtt] (default=srt) (currently youtube only)', default='srt') - video_format.add_option('--sub-lang', '--srt-lang', - action='store', dest='subtitleslang', metavar='LANG', - help='language of the subtitles to download (optional) use IETF language tags like \'en\'') + help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt') + subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang', + action='callback', dest='subtitleslangs', metavar='LANGS', type='str', + default=[], callback=_comma_separated_values_options_callback, + help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'') downloader.add_option('-r', '--rate-limit', dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50k or 44.6m)') @@ -254,6 +313,10 @@ def parseOpts(overrideArguments=None): verbosity.add_option('--dump-intermediate-pages', action='store_true', dest='dump_intermediate_pages', default=False, help='print downloaded pages to debug problems(very verbose)') + verbosity.add_option('--youtube-print-sig-code', + action='store_true', dest='youtube_print_sig_code', default=False, + help=optparse.SUPPRESS_HELP) + filesystem.add_option('-t', '--title', action='store_true', dest='usetitle', help='use title in file name (default)', default=False) @@ -303,6 +366,9 @@ def parseOpts(overrideArguments=None): filesystem.add_option('--write-info-json', action='store_true', dest='writeinfojson', help='write video metadata to a .info.json file', default=False) + filesystem.add_option('--write-annotations', + action='store_true', dest='writeannotations', + help='write video annotations to a .annotation file', default=False) filesystem.add_option('--write-thumbnail', action='store_true', dest='writethumbnail', help='write thumbnail image to disk', default=False) @@ -320,6 +386,10 @@ def parseOpts(overrideArguments=None): help='keeps the video file on disk after the post-processing; the video is erased by default') postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False, help='do not overwrite post-processed files; the post-processed files are overwritten by default') + postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False, + help='embed subtitles in the video (only for mp4 videos)') + postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False, + help='add metadata to the files') parser.add_option_group(general) @@ -328,28 +398,33 @@ def parseOpts(overrideArguments=None): parser.add_option_group(filesystem) parser.add_option_group(verbosity) parser.add_option_group(video_format) + parser.add_option_group(subtitles) parser.add_option_group(authentication) parser.add_option_group(postproc) if overrideArguments is not None: opts, args = parser.parse_args(overrideArguments) if opts.verbose: - sys.stderr.write(u'[debug] Override config: ' + repr(overrideArguments) + '\n') + write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n') else: xdg_config_home = os.environ.get('XDG_CONFIG_HOME') if xdg_config_home: - userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') + userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config') + if not os.path.isfile(userConfFile): + userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') else: - userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') + userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config') + if not os.path.isfile(userConfFile): + userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') systemConf = _readOptions('/etc/youtube-dl.conf') userConf = _readOptions(userConfFile) - commandLineConf = sys.argv[1:] + commandLineConf = sys.argv[1:] argv = systemConf + userConf + commandLineConf opts, args = parser.parse_args(argv) if opts.verbose: - sys.stderr.write(u'[debug] System config: ' + repr(systemConf) + '\n') - sys.stderr.write(u'[debug] User config: ' + repr(userConf) + '\n') - sys.stderr.write(u'[debug] Command-line args: ' + repr(commandLineConf) + '\n') + write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n') + write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n') + write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n') return parser, opts, args @@ -372,12 +447,12 @@ def _real_main(argv=None): except (IOError, OSError) as err: if opts.verbose: traceback.print_exc() - sys.stderr.write(u'ERROR: unable to open cookie file\n') + write_string(u'ERROR: unable to open cookie file\n') sys.exit(101) # Set user agent if opts.user_agent is not None: std_headers['User-Agent'] = opts.user_agent - + # Set referer if opts.referer is not None: std_headers['Referer'] = opts.referer @@ -398,28 +473,14 @@ def _real_main(argv=None): batchurls = batchfd.readlines() batchurls = [x.strip() for x in batchurls] batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)] + if opts.verbose: + write_string(u'[debug] Batch file urls: ' + repr(batchurls) + u'\n') except IOError: sys.exit(u'ERROR: batch file could not be read') all_urls = batchurls + args all_urls = [url.strip() for url in all_urls] - # General configuration - cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) - if opts.proxy is not None: - if opts.proxy == '': - proxies = {} - else: - proxies = {'http': opts.proxy, 'https': opts.proxy} - else: - proxies = compat_urllib_request.getproxies() - # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) - if 'http' in proxies and 'https' not in proxies: - proxies['https'] = proxies['http'] - proxy_handler = compat_urllib_request.ProxyHandler(proxies) - https_handler = make_HTTPS_handler(opts) - opener = compat_urllib_request.build_opener(https_handler, proxy_handler, cookie_processor, YoutubeDLHandler()) - compat_urllib_request.install_opener(opener) - socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) + opener = _setup_opener(jar=jar, opts=opts) extractors = gen_extractors() @@ -436,6 +497,8 @@ def _real_main(argv=None): if not ie._WORKING: continue desc = getattr(ie, 'IE_DESC', ie.IE_NAME) + if desc is False: + continue if hasattr(ie, 'SEARCH_KEY'): _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise') _COUNTS = (u'', u'5', u'10', u'all') @@ -507,6 +570,11 @@ def _real_main(argv=None): else: date = DateRange(opts.dateafter, opts.datebefore) + # --all-sub automatically sets --write-sub if --write-auto-sub is not given + # this was the old behaviour if only --all-sub was given. + if opts.allsubtitles and (opts.writeautomaticsub == False): + opts.writesubtitles = True + if sys.version_info < (3,): # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) if opts.outtmpl is not None: @@ -519,6 +587,10 @@ def _real_main(argv=None): or (opts.useid and u'%(id)s.%(ext)s') or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') or u'%(title)s-%(id)s.%(ext)s') + if '%(ext)s' not in outtmpl and opts.extractaudio: + parser.error(u'Cannot download a video and extract audio into the same' + u' file! Use "%%(ext)s" instead of %r' % + determine_ext(outtmpl, u'')) # YoutubeDL ydl = YoutubeDL({ @@ -553,11 +625,13 @@ def _real_main(argv=None): 'progress_with_newline': opts.progress_with_newline, 'playliststart': opts.playliststart, 'playlistend': opts.playlistend, + 'noplaylist': opts.noplaylist, 'logtostderr': opts.outtmpl == '-', 'consoletitle': opts.consoletitle, 'nopart': opts.nopart, 'updatetime': opts.updatetime, 'writedescription': opts.writedescription, + 'writeannotations': opts.writeannotations, 'writeinfojson': opts.writeinfojson, 'writethumbnail': opts.writethumbnail, 'writesubtitles': opts.writesubtitles, @@ -565,7 +639,7 @@ def _real_main(argv=None): 'allsubtitles': opts.allsubtitles, 'listsubtitles': opts.listsubtitles, 'subtitlesformat': opts.subtitlesformat, - 'subtitleslang': opts.subtitleslang, + 'subtitleslangs': opts.subtitleslangs, 'matchtitle': decodeOption(opts.matchtitle), 'rejecttitle': decodeOption(opts.rejecttitle), 'max_downloads': opts.max_downloads, @@ -577,10 +651,14 @@ def _real_main(argv=None): 'min_filesize': opts.min_filesize, 'max_filesize': opts.max_filesize, 'daterange': date, + 'cachedir': opts.cachedir, + 'youtube_print_sig_code': opts.youtube_print_sig_code, + 'age_limit': opts.age_limit, + 'download_archive': opts.download_archive, }) if opts.verbose: - ydl.to_screen(u'[debug] youtube-dl version ' + __version__) + write_string(u'[debug] youtube-dl version ' + __version__ + u'\n') try: sp = subprocess.Popen( ['git', 'rev-parse', '--short', 'HEAD'], @@ -589,23 +667,36 @@ def _real_main(argv=None): out, err = sp.communicate() out = out.decode().strip() if re.match('[0-9a-f]+', out): - ydl.to_screen(u'[debug] Git HEAD: ' + out) + write_string(u'[debug] Git HEAD: ' + out + u'\n') except: - sys.exc_clear() - ydl.to_screen(u'[debug] Python version %s - %s' %(platform.python_version(), platform.platform())) - ydl.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies)) + try: + sys.exc_clear() + except: + pass + write_string(u'[debug] Python version %s - %s' %(platform.python_version(), platform_name()) + u'\n') + + proxy_map = {} + for handler in opener.handlers: + if hasattr(handler, 'proxies'): + proxy_map.update(handler.proxies) + write_string(u'[debug] Proxy map: ' + compat_str(proxy_map) + u'\n') ydl.add_default_info_extractors() # PostProcessors + # Add the metadata pp first, the other pps will copy it + if opts.addmetadata: + ydl.add_post_processor(FFmpegMetadataPP()) if opts.extractaudio: ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites)) if opts.recodevideo: ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo)) + if opts.embedsubtitles: + ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat)) # Update version if opts.update_self: - update_self(ydl.to_screen, opts.verbose, sys.argv[0]) + update_self(ydl.to_screen, opts.verbose) # Maybe do nothing if len(all_urls) < 1: @@ -624,11 +715,42 @@ def _real_main(argv=None): if opts.cookiefile is not None: try: jar.save() - except (IOError, OSError) as err: + except (IOError, OSError): sys.exit(u'ERROR: unable to save cookie jar') sys.exit(retcode) + +def _setup_opener(jar=None, opts=None, timeout=300): + if opts is None: + FakeOptions = collections.namedtuple( + 'FakeOptions', ['proxy', 'no_check_certificate']) + opts = FakeOptions(proxy=None, no_check_certificate=False) + + cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) + if opts.proxy is not None: + if opts.proxy == '': + proxies = {} + else: + proxies = {'http': opts.proxy, 'https': opts.proxy} + else: + proxies = compat_urllib_request.getproxies() + # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805) + if 'http' in proxies and 'https' not in proxies: + proxies['https'] = proxies['http'] + proxy_handler = compat_urllib_request.ProxyHandler(proxies) + https_handler = make_HTTPS_handler(opts) + opener = compat_urllib_request.build_opener( + https_handler, proxy_handler, cookie_processor, YoutubeDLHandler()) + # Delete the default user-agent header, which would otherwise apply in + # cases where our custom HTTP handler doesn't come into play + # (See https://github.com/rg3/youtube-dl/issues/1309 for details) + opener.addheaders = [] + compat_urllib_request.install_opener(opener) + socket.setdefaulttimeout(timeout) + return opener + + def main(argv=None): try: _real_main(argv) diff --git a/youtube_dl/aes.py b/youtube_dl/aes.py new file mode 100644 index 000000000..9a0c93fa6 --- /dev/null +++ b/youtube_dl/aes.py @@ -0,0 +1,202 @@ +__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_decrypt_text'] + +import base64 +from math import ceil + +from .utils import bytes_to_intlist, intlist_to_bytes + +BLOCK_SIZE_BYTES = 16 + +def aes_ctr_decrypt(data, key, counter): + """ + Decrypt with aes in counter mode + + @param {int[]} data cipher + @param {int[]} key 16/24/32-Byte cipher key + @param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block) + returns the next counter block + @returns {int[]} decrypted data + """ + expanded_key = key_expansion(key) + block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) + + decrypted_data=[] + for i in range(block_count): + counter_block = counter.next_value() + block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES] + block += [0]*(BLOCK_SIZE_BYTES - len(block)) + + cipher_counter_block = aes_encrypt(counter_block, expanded_key) + decrypted_data += xor(block, cipher_counter_block) + decrypted_data = decrypted_data[:len(data)] + + return decrypted_data + +def key_expansion(data): + """ + Generate key schedule + + @param {int[]} data 16/24/32-Byte cipher key + @returns {int[]} 176/208/240-Byte expanded key + """ + data = data[:] # copy + rcon_iteration = 1 + key_size_bytes = len(data) + expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES + + while len(data) < expanded_key_size_bytes: + temp = data[-4:] + temp = key_schedule_core(temp, rcon_iteration) + rcon_iteration += 1 + data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) + + for _ in range(3): + temp = data[-4:] + data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) + + if key_size_bytes == 32: + temp = data[-4:] + temp = sub_bytes(temp) + data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) + + for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): + temp = data[-4:] + data += xor(temp, data[-key_size_bytes : 4-key_size_bytes]) + data = data[:expanded_key_size_bytes] + + return data + +def aes_encrypt(data, expanded_key): + """ + Encrypt one block with aes + + @param {int[]} data 16-Byte state + @param {int[]} expanded_key 176/208/240-Byte expanded key + @returns {int[]} 16-Byte cipher + """ + rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 + + data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) + for i in range(1, rounds+1): + data = sub_bytes(data) + data = shift_rows(data) + if i != rounds: + data = mix_columns(data) + data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) + + return data + +def aes_decrypt_text(data, password, key_size_bytes): + """ + Decrypt text + - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter + - The cipher key is retrieved by encrypting the first 16 Byte of 'password' + with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) + - Mode of operation is 'counter' + + @param {str} data Base64 encoded string + @param {str,unicode} password Password (will be encoded with utf-8) + @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit + @returns {str} Decrypted data + """ + NONCE_LENGTH_BYTES = 8 + + data = bytes_to_intlist(base64.b64decode(data)) + password = bytes_to_intlist(password.encode('utf-8')) + + key = password[:key_size_bytes] + [0]*(key_size_bytes - len(password)) + key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) + + nonce = data[:NONCE_LENGTH_BYTES] + cipher = data[NONCE_LENGTH_BYTES:] + + class Counter: + __value = nonce + [0]*(BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) + def next_value(self): + temp = self.__value + self.__value = inc(self.__value) + return temp + + decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) + plaintext = intlist_to_bytes(decrypted_data) + + return plaintext + +RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) +SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, + 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, + 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, + 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, + 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, + 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, + 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, + 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, + 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, + 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, + 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, + 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, + 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, + 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, + 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, + 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16) +MIX_COLUMN_MATRIX = ((2,3,1,1), + (1,2,3,1), + (1,1,2,3), + (3,1,1,2)) + +def sub_bytes(data): + return [SBOX[x] for x in data] + +def rotate(data): + return data[1:] + [data[0]] + +def key_schedule_core(data, rcon_iteration): + data = rotate(data) + data = sub_bytes(data) + data[0] = data[0] ^ RCON[rcon_iteration] + + return data + +def xor(data1, data2): + return [x^y for x, y in zip(data1, data2)] + +def mix_column(data): + data_mixed = [] + for row in range(4): + mixed = 0 + for column in range(4): + addend = data[column] + if MIX_COLUMN_MATRIX[row][column] in (2,3): + addend <<= 1 + if addend > 0xff: + addend &= 0xff + addend ^= 0x1b + if MIX_COLUMN_MATRIX[row][column] == 3: + addend ^= data[column] + mixed ^= addend & 0xff + data_mixed.append(mixed) + return data_mixed + +def mix_columns(data): + data_mixed = [] + for i in range(4): + column = data[i*4 : (i+1)*4] + data_mixed += mix_column(column) + return data_mixed + +def shift_rows(data): + data_shifted = [] + for column in range(4): + for row in range(4): + data_shifted.append( data[((column + row) & 0b11) * 4 + row] ) + return data_shifted + +def inc(data): + data = data[:] # copy + for i in range(len(data)-1,-1,-1): + if data[i] == 255: + data[i] = 0 + else: + data[i] = data[i] + 1 + break + return data diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py index cdbd880c7..5f0e2ec9b 100644 --- a/youtube_dl/extractor/__init__.py +++ b/youtube_dl/extractor/__init__.py @@ -1,31 +1,65 @@ +from .appletrailers import AppleTrailersIE +from .addanime import AddAnimeIE from .archiveorg import ArchiveOrgIE from .ard import ARDIE -from .arte import ArteTvIE +from .arte import ( + ArteTvIE, + ArteTVPlus7IE, + ArteTVCreativeIE, + ArteTVFutureIE, +) from .auengine import AUEngineIE from .bandcamp import BandcampIE from .bliptv import BlipTVIE, BlipTVUserIE +from .bloomberg import BloombergIE from .breakcom import BreakIE from .brightcove import BrightcoveIE +from .c56 import C56IE from .canalplus import CanalplusIE +from .canalc2 import Canalc2IE +from .cinemassacre import CinemassacreIE +from .cnn import CNNIE from .collegehumor import CollegeHumorIE from .comedycentral import ComedyCentralIE +from .condenast import CondeNastIE from .criterion import CriterionIE from .cspan import CSpanIE -from .dailymotion import DailymotionIE +from .dailymotion import ( + DailymotionIE, + DailymotionPlaylistIE, + DailymotionUserIE, +) +from .daum import DaumIE from .depositfiles import DepositFilesIE from .dotsub import DotsubIE from .dreisat import DreiSatIE +from .defense import DefenseGouvFrIE +from .ebaumsworld import EbaumsWorldIE from .ehow import EHowIE from .eighttracks import EightTracksIE from .escapist import EscapistIE +from .exfm import ExfmIE from .facebook import FacebookIE +from .faz import FazIE +from .fktv import ( + FKTVIE, + FKTVPosteckeIE, +) from .flickr import FlickrIE +from .francetv import ( + PluzzIE, + FranceTvInfoIE, + France2IE, + GenerationQuoiIE +) +from .freesound import FreesoundIE from .funnyordie import FunnyOrDieIE from .gamespot import GameSpotIE from .gametrailers import GametrailersIE from .generic import GenericIE from .googleplus import GooglePlusIE from .googlesearch import GoogleSearchIE +from .hark import HarkIE from .hotnewhiphop import HotNewHipHopIE from .howcast import HowcastIE from .hypem import HypemIE @@ -33,40 +67,77 @@ from .ign import IGNIE, OneUPIE from .ina import InaIE from .infoq import InfoQIE from .instagram import InstagramIE +from .internetvideoarchive import InternetVideoArchiveIE +from .jeuxvideo import JeuxVideoIE from .jukebox import JukeboxIE from .justintv import JustinTVIE +from .kankan import KankanIE +from .kickstarter import KickStarterIE from .keek import KeekIE from .liveleak import LiveLeakIE +from .livestream import LivestreamIE from .metacafe import MetacafeIE +from .metacritic import MetacriticIE +from .mit import TechTVMITIE, MITIE from .mixcloud import MixcloudIE from .mtv import MTVIE +from .muzu import MuzuTVIE from .myspass import MySpassIE from .myvideo import MyVideoIE +from .naver import NaverIE from .nba import NBAIE +from .nbc import NBCNewsIE +from .newgrounds import NewgroundsIE +from .nhl import NHLIE, NHLVideocenterIE +from .nowvideo import NowVideoIE +from .ooyala import OoyalaIE +from .orf import ORFIE +from .pbs import PBSIE from .photobucket import PhotobucketIE from .pornotube import PornotubeIE from .rbmaradio import RBMARadioIE from .redtube import RedTubeIE from .ringtv import RingTVIE -from .soundcloud import SoundcloudIE, SoundcloudSetIE +from .ro220 import Ro220IE +from .rottentomatoes import RottenTomatoesIE +from .roxwel import RoxwelIE +from .rtlnow import RTLnowIE +from .sina import SinaIE +from .slashdot import SlashdotIE +from .slideshare import SlideshareIE +from .sohu import SohuIE +from .soundcloud import SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE +from .southparkstudios import SouthParkStudiosIE from .spiegel import SpiegelIE from .stanfordoc import StanfordOpenClassroomIE from .statigram import StatigramIE from .steam import SteamIE +from .sztvhu import SztvHuIE from .teamcoco import TeamcocoIE from .ted import TEDIE from .tf1 import TF1IE +from .thisav import ThisAVIE from .traileraddict import TrailerAddictIE +from .trilulilu import TriluliluIE from .tudou import TudouIE from .tumblr import TumblrIE from .tutv import TutvIE -from .ustream import UstreamIE +from .unistra import UnistraIE +from .ustream import UstreamIE, UstreamChannelIE from .vbox7 import Vbox7IE +from .veehd import VeeHDIE from .veoh import VeohIE from .vevo import VevoIE -from .vimeo import VimeoIE +from .vice import ViceIE +from .viddler import ViddlerIE +from .videodetective import VideoDetectiveIE +from .videofyme import VideofyMeIE +from .videopremium import VideoPremiumIE +from .vimeo import VimeoIE, VimeoChannelIE from .vine import VineIE from .wat import WatIE +from .websurg import WeBSurgIE +from .weibo import WeiboIE from .wimp import WimpIE from .worldstarhiphop import WorldStarHipHopIE from .xhamster import XHamsterIE @@ -84,6 +155,10 @@ from .youtube import ( YoutubeChannelIE, YoutubeShowIE, YoutubeSubscriptionsIE, + YoutubeRecommendedIE, + YoutubeTruncatedURLIE, + YoutubeWatchLaterIE, + YoutubeFavouritesIE, ) from .zdf import ZDFIE @@ -95,12 +170,14 @@ _ALL_CLASSES = [ ] _ALL_CLASSES.append(GenericIE) + def gen_extractors(): """ Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL. """ return [klass() for klass in _ALL_CLASSES] + def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" return globals()[ie_name+'IE'] diff --git a/youtube_dl/extractor/addanime.py b/youtube_dl/extractor/addanime.py new file mode 100644 index 000000000..82a785a19 --- /dev/null +++ b/youtube_dl/extractor/addanime.py @@ -0,0 +1,75 @@ +import re + +from .common import InfoExtractor +from ..utils import ( + compat_HTTPError, + compat_str, + compat_urllib_parse, + compat_urllib_parse_urlparse, + + ExtractorError, +) + + +class AddAnimeIE(InfoExtractor): + + _VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)' + IE_NAME = u'AddAnime' + _TEST = { + u'url': u'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9', + u'file': u'24MR3YO5SAS9.flv', + u'md5': u'1036a0e0cd307b95bd8a8c3a5c8cfaf1', + u'info_dict': { + u"description": u"One Piece 606", + u"title": u"One Piece 606" + } + } + + def _real_extract(self, url): + try: + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('video_id') + webpage = self._download_webpage(url, video_id) + except ExtractorError as ee: + if not isinstance(ee.cause, compat_HTTPError): + raise + + redir_webpage = ee.cause.read().decode('utf-8') + action = self._search_regex( + r'<form id="challenge-form" action="([^"]+)"', + redir_webpage, u'Redirect form') + vc = self._search_regex( + r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>', + redir_webpage, u'redirect vc value') + av = re.search( + r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);', + redir_webpage) + if av is None: + raise ExtractorError(u'Cannot find redirect math task') + av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3)) + + parsed_url = compat_urllib_parse_urlparse(url) + av_val = av_res + len(parsed_url.netloc) + confirm_url = ( + parsed_url.scheme + u'://' + parsed_url.netloc + + action + '?' + + compat_urllib_parse.urlencode({ + 'jschl_vc': vc, 'jschl_answer': compat_str(av_val)})) + self._download_webpage( + confirm_url, video_id, + note=u'Confirming after redirect') + webpage = self._download_webpage(url, video_id) + + video_url = self._search_regex(r"var normal_video_file = '(.*?)';", + webpage, u'video file URL') + video_title = self._og_search_title(webpage) + video_description = self._og_search_description(webpage) + + return { + '_type': 'video', + 'id': video_id, + 'url': video_url, + 'ext': 'flv', + 'title': video_title, + 'description': video_description + } diff --git a/youtube_dl/extractor/appletrailers.py b/youtube_dl/extractor/appletrailers.py new file mode 100644 index 000000000..6d6237f8a --- /dev/null +++ b/youtube_dl/extractor/appletrailers.py @@ -0,0 +1,138 @@ +import re +import xml.etree.ElementTree +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urlparse, + determine_ext, +) + + +class AppleTrailersIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?trailers.apple.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)' + _TEST = { + u"url": u"http://trailers.apple.com/trailers/wb/manofsteel/", + u"playlist": [ + { + u"file": u"manofsteel-trailer4.mov", + u"md5": u"d97a8e575432dbcb81b7c3acb741f8a8", + u"info_dict": { + u"duration": 111, + u"title": u"Trailer 4", + u"upload_date": u"20130523", + u"uploader_id": u"wb", + }, + }, + { + u"file": u"manofsteel-trailer3.mov", + u"md5": u"b8017b7131b721fb4e8d6f49e1df908c", + u"info_dict": { + u"duration": 182, + u"title": u"Trailer 3", + u"upload_date": u"20130417", + u"uploader_id": u"wb", + }, + }, + { + u"file": u"manofsteel-trailer.mov", + u"md5": u"d0f1e1150989b9924679b441f3404d48", + u"info_dict": { + u"duration": 148, + u"title": u"Trailer", + u"upload_date": u"20121212", + u"uploader_id": u"wb", + }, + }, + { + u"file": u"manofsteel-teaser.mov", + u"md5": u"5fe08795b943eb2e757fa95cb6def1cb", + u"info_dict": { + u"duration": 93, + u"title": u"Teaser", + u"upload_date": u"20120721", + u"uploader_id": u"wb", + }, + } + ] + } + + _JSON_RE = r'iTunes.playURL\((.*?)\);' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + movie = mobj.group('movie') + uploader_id = mobj.group('company') + + playlist_url = compat_urlparse.urljoin(url, u'includes/playlists/itunes.inc') + playlist_snippet = self._download_webpage(playlist_url, movie) + playlist_cleaned = re.sub(r'(?s)<script[^<]*?>.*?</script>', u'', playlist_snippet) + playlist_cleaned = re.sub(r'<img ([^<]*?)>', r'<img \1/>', playlist_cleaned) + # The ' in the onClick attributes are not escaped, it couldn't be parsed + # with xml.etree.ElementTree.fromstring + # like: http://trailers.apple.com/trailers/wb/gravity/ + def _clean_json(m): + return u'iTunes.playURL(%s);' % m.group(1).replace('\'', ''') + playlist_cleaned = re.sub(self._JSON_RE, _clean_json, playlist_cleaned) + playlist_html = u'<html>' + playlist_cleaned + u'</html>' + + doc = xml.etree.ElementTree.fromstring(playlist_html) + playlist = [] + for li in doc.findall('./div/ul/li'): + on_click = li.find('.//a').attrib['onClick'] + trailer_info_json = self._search_regex(self._JSON_RE, + on_click, u'trailer info') + trailer_info = json.loads(trailer_info_json) + title = trailer_info['title'] + video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower() + thumbnail = li.find('.//img').attrib['src'] + upload_date = trailer_info['posted'].replace('-', '') + + runtime = trailer_info['runtime'] + m = re.search(r'(?P<minutes>[0-9]+):(?P<seconds>[0-9]{1,2})', runtime) + duration = None + if m: + duration = 60 * int(m.group('minutes')) + int(m.group('seconds')) + + first_url = trailer_info['url'] + trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower() + settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id) + settings_json = self._download_webpage(settings_json_url, trailer_id, u'Downloading settings json') + settings = json.loads(settings_json) + + formats = [] + for format in settings['metadata']['sizes']: + # The src is a file pointing to the real video file + format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src']) + formats.append({ + 'url': format_url, + 'ext': determine_ext(format_url), + 'format': format['type'], + 'width': format['width'], + 'height': int(format['height']), + }) + formats = sorted(formats, key=lambda f: (f['height'], f['width'])) + + info = { + '_type': 'video', + 'id': video_id, + 'title': title, + 'formats': formats, + 'title': title, + 'duration': duration, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'uploader_id': uploader_id, + 'user_agent': 'QuickTime compatible (youtube-dl)', + } + # TODO: Remove when #980 has been merged + info['url'] = formats[-1]['url'] + info['ext'] = formats[-1]['ext'] + + playlist.append(info) + + return { + '_type': 'playlist', + 'id': movie, + 'entries': playlist, + } diff --git a/youtube_dl/extractor/archiveorg.py b/youtube_dl/extractor/archiveorg.py index 7efd1d823..61ce4469a 100644 --- a/youtube_dl/extractor/archiveorg.py +++ b/youtube_dl/extractor/archiveorg.py @@ -46,6 +46,8 @@ class ArchiveOrgIE(InfoExtractor): for fn,fdata in data['files'].items() if 'Video' in fdata['format']] formats.sort(key=lambda fdata: fdata['file_size']) + for f in formats: + f['ext'] = determine_ext(f['url']) info = { '_type': 'video', @@ -61,7 +63,6 @@ class ArchiveOrgIE(InfoExtractor): info['thumbnail'] = thumbnail # TODO: Remove when #980 has been merged - info['url'] = formats[-1]['url'] - info['ext'] = determine_ext(formats[-1]['url']) + info.update(formats[-1]) - return info
\ No newline at end of file + return info diff --git a/youtube_dl/extractor/arte.py b/youtube_dl/extractor/arte.py index 993e30f7a..5ee8a67b1 100644 --- a/youtube_dl/extractor/arte.py +++ b/youtube_dl/extractor/arte.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import re import json import xml.etree.ElementTree @@ -7,23 +8,24 @@ from ..utils import ( ExtractorError, find_xpath_attr, unified_strdate, + determine_ext, + get_element_by_id, ) +# There are different sources of video in arte.tv, the extraction process +# is different for each one. The videos usually expire in 7 days, so we can't +# add tests. + class ArteTvIE(InfoExtractor): - """ - There are two sources of video in arte.tv: videos.arte.tv and - www.arte.tv/guide, the extraction process is different for each one. - The videos expire in 7 days, so we can't add tests. - """ - _EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?' _VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html' + _LIVEWEB_URL = r'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)' _LIVE_URL = r'index-[0-9]+\.html$' IE_NAME = u'arte.tv' @classmethod def suitable(cls, url): - return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL)) + return any(re.match(regex, url) for regex in (cls._VIDEOS_URL, cls._LIVEWEB_URL)) # TODO implement Live Stream # from ..utils import compat_urllib_parse @@ -54,66 +56,23 @@ class ArteTvIE(InfoExtractor): # video_url = u'%s/%s' % (info.get('url'), info.get('path')) def _real_extract(self, url): - mobj = re.match(self._EMISSION_URL, url) - if mobj is not None: - lang = mobj.group('lang') - # This is not a real id, it can be for example AJT for the news - # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal - video_id = mobj.group('id') - return self._extract_emission(url, video_id, lang) - mobj = re.match(self._VIDEOS_URL, url) if mobj is not None: id = mobj.group('id') lang = mobj.group('lang') return self._extract_video(url, id, lang) + mobj = re.match(self._LIVEWEB_URL, url) + if mobj is not None: + name = mobj.group('name') + lang = mobj.group('lang') + return self._extract_liveweb(url, name, lang) + if re.search(self._LIVE_URL, video_id) is not None: raise ExtractorError(u'Arte live streams are not yet supported, sorry') # self.extractLiveStream(url) # return - def _extract_emission(self, url, video_id, lang): - """Extract from www.arte.tv/guide""" - webpage = self._download_webpage(url, video_id) - json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url') - - json_info = self._download_webpage(json_url, video_id, 'Downloading info json') - self.report_extraction(video_id) - info = json.loads(json_info) - player_info = info['videoJsonPlayer'] - - info_dict = {'id': player_info['VID'], - 'title': player_info['VTI'], - 'description': player_info['VDE'], - 'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]), - 'thumbnail': player_info['programImage'], - 'ext': 'flv', - } - - formats = player_info['VSR'].values() - def _match_lang(f): - # Return true if that format is in the language of the url - if lang == 'fr': - l = 'F' - elif lang == 'de': - l = 'A' - regexes = [r'VO?%s' % l, r'V%s-ST.' % l] - return any(re.match(r, f['versionCode']) for r in regexes) - # Some formats may not be in the same language as the url - formats = filter(_match_lang, formats) - # We order the formats by quality - formats = sorted(formats, key=lambda f: int(f['height'])) - # Pick the best quality - format_info = formats[-1] - if format_info['mediaType'] == u'rtmp': - info_dict['url'] = format_info['streamer'] - info_dict['play_path'] = 'mp4:' + format_info['url'] - else: - info_dict['url'] = format_info['url'] - - return info_dict - def _extract_video(self, url, video_id, lang): """Extract from videos.arte.tv""" ref_xml_url = url.replace('/videos/', '/do_delegate/videos/') @@ -144,3 +103,129 @@ class ArteTvIE(InfoExtractor): 'url': video_url, 'ext': 'flv', } + + def _extract_liveweb(self, url, name, lang): + """Extract form http://liveweb.arte.tv/""" + webpage = self._download_webpage(url, name) + video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id') + config_xml = self._download_webpage('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id, + video_id, u'Downloading information') + config_doc = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8')) + event_doc = config_doc.find('event') + url_node = event_doc.find('video').find('urlHd') + if url_node is None: + url_node = video_doc.find('urlSd') + + return {'id': video_id, + 'title': event_doc.find('name%s' % lang.capitalize()).text, + 'url': url_node.text.replace('MP4', 'mp4'), + 'ext': 'flv', + 'thumbnail': self._og_search_thumbnail(webpage), + } + + +class ArteTVPlus7IE(InfoExtractor): + IE_NAME = u'arte.tv:+7' + _VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?' + + @classmethod + def _extract_url_info(cls, url): + mobj = re.match(cls._VALID_URL, url) + lang = mobj.group('lang') + # This is not a real id, it can be for example AJT for the news + # http://www.arte.tv/guide/fr/emissions/AJT/arte-journal + video_id = mobj.group('id') + return video_id, lang + + def _real_extract(self, url): + video_id, lang = self._extract_url_info(url) + webpage = self._download_webpage(url, video_id) + return self._extract_from_webpage(webpage, video_id, lang) + + def _extract_from_webpage(self, webpage, video_id, lang): + json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url') + + json_info = self._download_webpage(json_url, video_id, 'Downloading info json') + self.report_extraction(video_id) + info = json.loads(json_info) + player_info = info['videoJsonPlayer'] + + info_dict = { + 'id': player_info['VID'], + 'title': player_info['VTI'], + 'description': player_info.get('VDE'), + 'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]), + 'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'), + } + + formats = player_info['VSR'].values() + def _match_lang(f): + if f.get('versionCode') is None: + return True + # Return true if that format is in the language of the url + if lang == 'fr': + l = 'F' + elif lang == 'de': + l = 'A' + regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l] + return any(re.match(r, f['versionCode']) for r in regexes) + # Some formats may not be in the same language as the url + formats = filter(_match_lang, formats) + # Some formats use the m3u8 protocol + formats = filter(lambda f: f.get('videoFormat') != 'M3U8', formats) + # We order the formats by quality + formats = sorted(formats, key=lambda f: int(f.get('height',-1))) + # Prefer videos without subtitles in the same language + formats = sorted(formats, key=lambda f: re.match(r'VO(F|A)-STM\1', f.get('versionCode', '')) is None) + # Pick the best quality + def _format(format_info): + info = { + 'width': format_info.get('width'), + 'height': format_info.get('height'), + } + if format_info['mediaType'] == u'rtmp': + info['url'] = format_info['streamer'] + info['play_path'] = 'mp4:' + format_info['url'] + info['ext'] = 'flv' + else: + info['url'] = format_info['url'] + info['ext'] = determine_ext(info['url']) + return info + info_dict['formats'] = [_format(f) for f in formats] + # TODO: Remove when #980 has been merged + info_dict.update(info_dict['formats'][-1]) + + return info_dict + + +# It also uses the arte_vp_url url from the webpage to extract the information +class ArteTVCreativeIE(ArteTVPlus7IE): + IE_NAME = u'arte.tv:creative' + _VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)' + + _TEST = { + u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design', + u'file': u'050489-002.mp4', + u'info_dict': { + u'title': u'Agentur Amateur #2 - Corporate Design', + }, + } + + +class ArteTVFutureIE(ArteTVPlus7IE): + IE_NAME = u'arte.tv:future' + _VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)' + + _TEST = { + u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081', + u'file': u'050940-003.mp4', + u'info_dict': { + u'title': u'Les champignons au secours de la planète', + }, + } + + def _real_extract(self, url): + anchor_id, lang = self._extract_url_info(url) + webpage = self._download_webpage(url, anchor_id) + row = get_element_by_id(anchor_id, webpage) + return self._extract_from_webpage(row, anchor_id, lang) diff --git a/youtube_dl/extractor/bliptv.py b/youtube_dl/extractor/bliptv.py index 08b28c994..493504f75 100644 --- a/youtube_dl/extractor/bliptv.py +++ b/youtube_dl/extractor/bliptv.py @@ -115,7 +115,7 @@ class BlipTVIE(InfoExtractor): ext = umobj.group(1) info = { - 'id': data['item_id'], + 'id': compat_str(data['item_id']), 'url': video_url, 'uploader': data['display_name'], 'upload_date': upload_date, diff --git a/youtube_dl/extractor/bloomberg.py b/youtube_dl/extractor/bloomberg.py new file mode 100644 index 000000000..3666a780b --- /dev/null +++ b/youtube_dl/extractor/bloomberg.py @@ -0,0 +1,27 @@ +import re + +from .common import InfoExtractor + + +class BloombergIE(InfoExtractor): + _VALID_URL = r'https?://www\.bloomberg\.com/video/(?P<name>.+?).html' + + _TEST = { + u'url': u'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html', + u'file': u'12bzhqZTqQHmmlA8I-i0NpzJgcG5NNYX.mp4', + u'info_dict': { + u'title': u'Shah\'s Presentation on Foreign-Exchange Strategies', + u'description': u'md5:abc86e5236f9f0e4866c59ad36736686', + }, + u'params': { + # Requires ffmpeg (m3u8 manifest) + u'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + name = mobj.group('name') + webpage = self._download_webpage(url, name) + ooyala_url = self._og_search_video_url(webpage) + return self.url_result(ooyala_url, ie='Ooyala') diff --git a/youtube_dl/extractor/breakcom.py b/youtube_dl/extractor/breakcom.py index 34f555e89..53a898de3 100644 --- a/youtube_dl/extractor/breakcom.py +++ b/youtube_dl/extractor/breakcom.py @@ -1,6 +1,8 @@ import re +import json from .common import InfoExtractor +from ..utils import determine_ext class BreakIE(InfoExtractor): @@ -17,17 +19,20 @@ class BreakIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group(1).split("-")[-1] - webpage = self._download_webpage(url, video_id) - video_url = re.search(r"videoPath: '(.+?)',",webpage).group(1) - key = re.search(r"icon: '(.+?)',",webpage).group(1) - final_url = str(video_url)+"?"+str(key) - thumbnail_url = re.search(r"thumbnailURL: '(.+?)'",webpage).group(1) - title = re.search(r"sVidTitle: '(.+)',",webpage).group(1) - ext = video_url.split('.')[-1] + embed_url = 'http://www.break.com/embed/%s' % video_id + webpage = self._download_webpage(embed_url, video_id) + info_json = self._search_regex(r'var embedVars = ({.*?});', webpage, + u'info json', flags=re.DOTALL) + info = json.loads(info_json) + video_url = info['videoUri'] + m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url) + if m_youtube is not None: + return self.url_result(m_youtube.group(1), 'Youtube') + final_url = video_url + '?' + info['AuthToken'] return [{ 'id': video_id, 'url': final_url, - 'ext': ext, - 'title': title, - 'thumbnail': thumbnail_url, + 'ext': determine_ext(final_url), + 'title': info['contentName'], + 'thumbnail': info['thumbUri'], }] diff --git a/youtube_dl/extractor/brightcove.py b/youtube_dl/extractor/brightcove.py index 71e3c7883..745212f2f 100644 --- a/youtube_dl/extractor/brightcove.py +++ b/youtube_dl/extractor/brightcove.py @@ -1,3 +1,5 @@ +# encoding: utf-8 + import re import json import xml.etree.ElementTree @@ -7,15 +9,39 @@ from ..utils import ( compat_urllib_parse, find_xpath_attr, compat_urlparse, + + ExtractorError, ) class BrightcoveIE(InfoExtractor): _VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)' _FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s' _PLAYLIST_URL_TEMPLATE = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' - - # There is a test for Brigtcove in GenericIE, that way we test both the download - # and the detection of videos, and we don't have to find an URL that is always valid + + _TESTS = [ + { + # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/ + u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', + u'file': u'2371591881001.mp4', + u'md5': u'9e80619e0a94663f0bdc849b4566af19', + u'note': u'Test Brightcove downloads and detection in GenericIE', + u'info_dict': { + u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', + u'uploader': u'8TV', + u'description': u'md5:a950cc4285c43e44d763d036710cd9cd', + } + }, + { + # From http://medianetwork.oracle.com/video/player/1785452137001 + u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001', + u'file': u'1785452137001.flv', + u'info_dict': { + u'title': u'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges', + u'description': u'John Rose speaks at the JVM Language Summit, August 1, 2012.', + u'uploader': u'Oracle', + }, + }, + ] @classmethod def _build_brighcove_url(cls, object_str): @@ -23,6 +49,11 @@ class BrightcoveIE(InfoExtractor): Build a Brightcove url from a xml string containing <object class="BrightcoveExperience">{params}</object> """ + + # Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553 + object_str = re.sub(r'(<param name="[^"]+" value="[^"]+")>', + lambda m: m.group(1) + '/>', object_str) + object_doc = xml.etree.ElementTree.fromstring(object_str) assert u'BrightcoveExperience' in object_doc.attrib['class'] params = {'flashID': object_doc.attrib['id'], @@ -72,15 +103,27 @@ class BrightcoveIE(InfoExtractor): playlist_title=playlist_info['mediaCollectionDTO']['displayName']) def _extract_video_info(self, video_info): - renditions = video_info['renditions'] - renditions = sorted(renditions, key=lambda r: r['size']) - best_format = renditions[-1] + info = { + 'id': video_info['id'], + 'title': video_info['displayName'], + 'description': video_info.get('shortDescription'), + 'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'), + 'uploader': video_info.get('publisherName'), + } - return {'id': video_info['id'], - 'title': video_info['displayName'], - 'url': best_format['defaultURL'], + renditions = video_info.get('renditions') + if renditions: + renditions = sorted(renditions, key=lambda r: r['size']) + best_format = renditions[-1] + info.update({ + 'url': best_format['defaultURL'], 'ext': 'mp4', - 'description': video_info.get('shortDescription'), - 'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'), - 'uploader': video_info.get('publisherName'), - } + }) + elif video_info.get('FLVFullLengthURL') is not None: + info.update({ + 'url': video_info['FLVFullLengthURL'], + 'ext': 'flv', + }) + else: + raise ExtractorError(u'Unable to extract video url for %s' % info['id']) + return info diff --git a/youtube_dl/extractor/c56.py b/youtube_dl/extractor/c56.py new file mode 100644 index 000000000..dc3a8d47d --- /dev/null +++ b/youtube_dl/extractor/c56.py @@ -0,0 +1,36 @@ +# coding: utf-8 + +import re +import json + +from .common import InfoExtractor +from ..utils import determine_ext + +class C56IE(InfoExtractor): + _VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)' + IE_NAME = u'56.com' + + _TEST ={ + u'url': u'http://www.56.com/u39/v_OTM0NDA3MTY.html', + u'file': u'93440716.flv', + u'md5': u'e59995ac63d0457783ea05f93f12a866', + u'info_dict': { + u'title': u'网事知多少 第32期:车怒', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) + text_id = mobj.group('textid') + info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id, + text_id, u'Downloading video info') + info = json.loads(info_page)['info'] + best_format = sorted(info['rfiles'], key=lambda f: int(f['filesize']))[-1] + video_url = best_format['url'] + + return {'id': info['vid'], + 'title': info['Subject'], + 'url': video_url, + 'ext': determine_ext(video_url), + 'thumbnail': info.get('bimg') or info.get('img'), + } diff --git a/youtube_dl/extractor/canalc2.py b/youtube_dl/extractor/canalc2.py new file mode 100644 index 000000000..e7f4fa9fd --- /dev/null +++ b/youtube_dl/extractor/canalc2.py @@ -0,0 +1,35 @@ +# coding: utf-8 +import re + +from .common import InfoExtractor + + +class Canalc2IE(InfoExtractor): + IE_NAME = 'canalc2.tv' + _VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?idVideo=(\d+)&voir=oui' + + _TEST = { + u'url': u'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui', + u'file': u'12163.mp4', + u'md5': u'060158428b650f896c542dfbb3d6487f', + u'info_dict': { + u'title': u'Terrasses du Numérique' + } + } + + def _real_extract(self, url): + video_id = re.match(self._VALID_URL, url).group(1) + webpage = self._download_webpage(url, video_id) + file_name = self._search_regex( + r"so\.addVariable\('file','(.*?)'\);", + webpage, 'file name') + video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name + + title = self._html_search_regex( + r'class="evenement8">(.*?)</a>', webpage, u'title') + + return {'id': video_id, + 'ext': 'mp4', + 'url': video_url, + 'title': title, + } diff --git a/youtube_dl/extractor/canalplus.py b/youtube_dl/extractor/canalplus.py index 3b1c88876..1db9b24cf 100644 --- a/youtube_dl/extractor/canalplus.py +++ b/youtube_dl/extractor/canalplus.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import re import xml.etree.ElementTree @@ -5,24 +6,29 @@ from .common import InfoExtractor from ..utils import unified_strdate class CanalplusIE(InfoExtractor): - _VALID_URL = r'https?://www\.canalplus\.fr/.*?\?vid=(?P<id>\d+)' + _VALID_URL = r'https?://(www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>\d+))' _VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s' IE_NAME = u'canalplus.fr' _TEST = { - u'url': u'http://www.canalplus.fr/c-divertissement/pid3351-c-le-petit-journal.html?vid=889861', - u'file': u'889861.flv', - u'md5': u'590a888158b5f0d6832f84001fbf3e99', + u'url': u'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470', + u'file': u'922470.flv', u'info_dict': { - u'title': u'Le Petit Journal 20/06/13 - La guerre des drone', - u'upload_date': u'20130620', + u'title': u'Zapping - 26/08/13', + u'description': u'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013', + u'upload_date': u'20130826', + }, + u'params': { + u'skip_download': True, }, - u'skip': u'Requires rtmpdump' } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') + if video_id is None: + webpage = self._download_webpage(url, mobj.group('path')) + video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id') info_url = self._VIDEO_INFO_TEMPLATE % video_id info_page = self._download_webpage(info_url,video_id, u'Downloading video info') @@ -43,4 +49,6 @@ class CanalplusIE(InfoExtractor): 'ext': 'flv', 'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text), 'thumbnail': media.find('IMAGES/GRAND').text, + 'description': infos.find('DESCRIPTION').text, + 'view_count': int(infos.find('NB_VUES').text), } diff --git a/youtube_dl/extractor/cinemassacre.py b/youtube_dl/extractor/cinemassacre.py new file mode 100644 index 000000000..6925b96c2 --- /dev/null +++ b/youtube_dl/extractor/cinemassacre.py @@ -0,0 +1,91 @@ +# encoding: utf-8 +import re + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, +) + + +class CinemassacreIE(InfoExtractor): + _VALID_URL = r'(?:http://)?(?:www\.)?(?P<url>cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/.+?)(?:[/?].*)?' + _TESTS = [{ + u'url': u'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/', + u'file': u'19911.flv', + u'info_dict': { + u'upload_date': u'20121110', + u'title': u'“Angry Video Game Nerd: The Movie” – Trailer', + u'description': u'md5:fb87405fcb42a331742a0dce2708560b', + }, + u'params': { + # rtmp download + u'skip_download': True, + }, + }, + { + u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940', + u'file': u'521be8ef82b16.flv', + u'info_dict': { + u'upload_date': u'20131002', + u'title': u'The Mummy’s Hand (1940)', + }, + u'params': { + # rtmp download + u'skip_download': True, + }, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + webpage_url = u'http://' + mobj.group('url') + webpage = self._download_webpage(webpage_url, None) # Don't know video id yet + video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d') + mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/(?:embed|player)\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage) + if not mobj: + raise ExtractorError(u'Can\'t extract embed url and video id') + playerdata_url = mobj.group(u'embed_url') + video_id = mobj.group(u'video_id') + + video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|', + webpage, u'title') + video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>', + webpage, u'description', flags=re.DOTALL, fatal=False) + if len(video_description) == 0: + video_description = None + + playerdata = self._download_webpage(playerdata_url, video_id) + base_url = self._html_search_regex(r'\'streamer\': \'(?P<base_url>rtmp://.*?)/(?:vod|Cinemassacre)\'', + playerdata, u'base_url') + base_url += '/Cinemassacre/' + # Important: The file names in playerdata are not used by the player and even wrong for some videos + sd_file = 'Cinemassacre-%s_high.mp4' % video_id + hd_file = 'Cinemassacre-%s.mp4' % video_id + video_thumbnail = 'http://image.screenwavemedia.com/Cinemassacre/Cinemassacre-%s_thumb_640x360.jpg' % video_id + + formats = [ + { + 'url': base_url + sd_file, + 'ext': 'flv', + 'format': 'sd', + 'format_id': 'sd', + }, + { + 'url': base_url + hd_file, + 'ext': 'flv', + 'format': 'hd', + 'format_id': 'hd', + }, + ] + + info = { + 'id': video_id, + 'title': video_title, + 'formats': formats, + 'description': video_description, + 'upload_date': video_date, + 'thumbnail': video_thumbnail, + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + return info diff --git a/youtube_dl/extractor/cnn.py b/youtube_dl/extractor/cnn.py new file mode 100644 index 000000000..a79f881cd --- /dev/null +++ b/youtube_dl/extractor/cnn.py @@ -0,0 +1,58 @@ +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import determine_ext + + +class CNNIE(InfoExtractor): + _VALID_URL = r'''(?x)https?://(edition\.)?cnn\.com/video/(data/.+?|\?)/ + (?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))''' + + _TESTS = [{ + u'url': u'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn', + u'file': u'sports_2013_06_09_nadal-1-on-1.cnn.mp4', + u'md5': u'3e6121ea48df7e2259fe73a0628605c4', + u'info_dict': { + u'title': u'Nadal wins 8th French Open title', + u'description': u'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.', + }, + }, + { + u"url": u"http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29", + u"file": u"us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4", + u"md5": u"b5cc60c60a3477d185af8f19a2a26f4e", + u"info_dict": { + u"title": "Student's epic speech stuns new freshmen", + u"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"" + } + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + path = mobj.group('path') + page_title = mobj.group('title') + info_url = u'http://cnn.com/video/data/3.0/%s/index.xml' % path + info_xml = self._download_webpage(info_url, page_title) + info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) + + formats = [] + for f in info.findall('files/file'): + mf = re.match(r'(\d+)x(\d+)(?:_(.*)k)?',f.attrib['bitrate']) + if mf is not None: + formats.append((int(mf.group(1)), int(mf.group(2)), int(mf.group(3) or 0), f.text)) + formats = sorted(formats) + (_,_,_, video_path) = formats[-1] + video_url = 'http://ht.cdn.turner.com/cnn/big%s' % video_path + + thumbnails = sorted([((int(t.attrib['height']),int(t.attrib['width'])), t.text) for t in info.findall('images/image')]) + thumbs_dict = [{'resolution': res, 'url': t_url} for (res, t_url) in thumbnails] + + return {'id': info.attrib['id'], + 'title': info.find('headline').text, + 'url': video_url, + 'ext': determine_ext(video_url), + 'thumbnail': thumbnails[-1][1], + 'thumbnails': thumbs_dict, + 'description': info.find('description').text, + } diff --git a/youtube_dl/extractor/collegehumor.py b/youtube_dl/extractor/collegehumor.py index 7ae0972e5..8d4c93d6d 100644 --- a/youtube_dl/extractor/collegehumor.py +++ b/youtube_dl/extractor/collegehumor.py @@ -1,26 +1,36 @@ import re -import socket import xml.etree.ElementTree from .common import InfoExtractor from ..utils import ( - compat_http_client, - compat_str, - compat_urllib_error, compat_urllib_parse_urlparse, - compat_urllib_request, + determine_ext, ExtractorError, ) class CollegeHumorIE(InfoExtractor): - _WORKING = False - _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$' + _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$' - def report_manifest(self, video_id): - """Report information extraction.""" - self.to_screen(u'%s: Downloading XML manifest' % video_id) + _TESTS = [{ + u'url': u'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe', + u'file': u'6902724.mp4', + u'md5': u'1264c12ad95dca142a9f0bf7968105a0', + u'info_dict': { + u'title': u'Comic-Con Cosplay Catastrophe', + u'description': u'Fans get creative this year at San Diego. Too creative. And yes, that\'s really Joss Whedon.', + }, + }, + { + u'url': u'http://www.collegehumor.com/video/3505939/font-conference', + u'file': u'3505939.mp4', + u'md5': u'c51ca16b82bb456a4397987791a835f5', + u'info_dict': { + u'title': u'Font Conference', + u'description': u'This video wasn\'t long enough, so we made it double-spaced.', + }, + }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -36,39 +46,42 @@ class CollegeHumorIE(InfoExtractor): self.report_extraction(video_id) xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id - try: - metaXml = compat_urllib_request.urlopen(xmlUrl).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err)) + metaXml = self._download_webpage(xmlUrl, video_id, + u'Downloading info XML', + u'Unable to download video info XML') mdoc = xml.etree.ElementTree.fromstring(metaXml) try: videoNode = mdoc.findall('./video')[0] + youtubeIdNode = videoNode.find('./youtubeID') + if youtubeIdNode is not None: + return self.url_result(youtubeIdNode.text, 'Youtube') info['description'] = videoNode.findall('./description')[0].text info['title'] = videoNode.findall('./caption')[0].text info['thumbnail'] = videoNode.findall('./thumbnail')[0].text - manifest_url = videoNode.findall('./file')[0].text + next_url = videoNode.findall('./file')[0].text except IndexError: raise ExtractorError(u'Invalid metadata XML file') - manifest_url += '?hdcore=2.10.3' - self.report_manifest(video_id) - try: - manifestXml = compat_urllib_request.urlopen(manifest_url).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err)) - - adoc = xml.etree.ElementTree.fromstring(manifestXml) - try: - media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0] - node_id = media_node.attrib['url'] - video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text - except IndexError as err: - raise ExtractorError(u'Invalid manifest file') + if next_url.endswith(u'manifest.f4m'): + manifest_url = next_url + '?hdcore=2.10.3' + manifestXml = self._download_webpage(manifest_url, video_id, + u'Downloading XML manifest', + u'Unable to download video info XML') - url_pr = compat_urllib_parse_urlparse(manifest_url) - url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1' + adoc = xml.etree.ElementTree.fromstring(manifestXml) + try: + media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0] + node_id = media_node.attrib['url'] + video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text + except IndexError as err: + raise ExtractorError(u'Invalid manifest file') + url_pr = compat_urllib_parse_urlparse(info['thumbnail']) + info['url'] = url_pr.scheme + '://' + url_pr.netloc + video_id[:-2].replace('.csmil','').replace(',','') + info['ext'] = 'mp4' + else: + # Old-style direct links + info['url'] = next_url + info['ext'] = determine_ext(info['url']) - info['url'] = url - info['ext'] = 'f4f' - return [info] + return info diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py index 93d9e3d5e..69b2beece 100644 --- a/youtube_dl/extractor/comedycentral.py +++ b/youtube_dl/extractor/comedycentral.py @@ -24,7 +24,9 @@ class ComedyCentralIE(InfoExtractor): (full-episodes/(?P<episode>.*)| (?P<clip> (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?)) - |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))))) + |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))| + (?P<interview> + extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?))) $""" _TEST = { u'url': u'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart', @@ -49,12 +51,12 @@ class ComedyCentralIE(InfoExtractor): '400': 'mp4', } _video_dimensions = { - '3500': '1280x720', - '2200': '960x540', - '1700': '768x432', - '1200': '640x360', - '750': '512x288', - '400': '384x216', + '3500': (1280, 720), + '2200': (960, 540), + '1700': (768, 432), + '1200': (640, 360), + '750': (512, 288), + '400': (384, 216), } @classmethod @@ -62,11 +64,13 @@ class ComedyCentralIE(InfoExtractor): """Receives a URL and returns True if suitable for this IE.""" return re.match(cls._VALID_URL, url, re.VERBOSE) is not None - def _print_formats(self, formats): - print('Available formats:') - for x in formats: - print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???'))) - + @staticmethod + def _transform_rtmp_url(rtmp_video_url): + m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url) + if not m: + raise ExtractorError(u'Cannot transform RTMP url') + base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/' + return base + m.group('finalid') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url, re.VERBOSE) @@ -87,6 +91,9 @@ class ComedyCentralIE(InfoExtractor): else: epTitle = mobj.group('cntitle') dlNewest = False + elif mobj.group('interview'): + epTitle = mobj.group('interview_title') + dlNewest = False else: dlNewest = not mobj.group('episode') if dlNewest: @@ -150,40 +157,31 @@ class ComedyCentralIE(InfoExtractor): self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found') continue - if self._downloader.params.get('listformats', None): - self._print_formats([i[0] for i in turls]) - return - - # For now, just pick the highest bitrate - format,rtmp_video_url = turls[-1] - - # Get the format arg from the arg stream - req_format = self._downloader.params.get('format', None) - - # Select format if we can find one - for f,v in turls: - if f == req_format: - format, rtmp_video_url = f, v - break - - m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url) - if not m: - raise ExtractorError(u'Cannot transform RTMP url') - base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/' - video_url = base + m.group('finalid') + formats = [] + for format, rtmp_video_url in turls: + w, h = self._video_dimensions.get(format, (None, None)) + formats.append({ + 'url': self._transform_rtmp_url(rtmp_video_url), + 'ext': self._video_extensions.get(format, 'mp4'), + 'format_id': format, + 'height': h, + 'width': w, + }) effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1) info = { 'id': shortMediaId, - 'url': video_url, + 'formats': formats, 'uploader': showId, 'upload_date': officialDate, 'title': effTitle, - 'ext': 'mp4', - 'format': format, 'thumbnail': None, 'description': compat_str(officialTitle), } + + # TODO: Remove when #980 has been merged + info.update(info['formats'][-1]) + results.append(info) return results diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index ec988fc90..2a5a85dc6 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -14,6 +14,7 @@ from ..utils import ( clean_html, compiled_regex_type, ExtractorError, + unescapeHTML, ) class InfoExtractor(object): @@ -34,6 +35,8 @@ class InfoExtractor(object): title: Video title, unescaped. ext: Video filename extension. + Instead of url and ext, formats can also specified. + The following fields are optional: format: The video format, defaults to ext (used for --get-format) @@ -46,12 +49,25 @@ class InfoExtractor(object): uploader_id: Nickname or id of the video uploader. location: Physical location of the video. player_url: SWF Player URL (used for rtmpdump). - subtitles: The subtitle file contents. + subtitles: The subtitle file contents as a dictionary in the format + {language: subtitles}. view_count: How many users have watched the video on the platform. urlhandle: [internal] The urlHandle to be used to download the file, like returned by urllib.request.urlopen - - The fields should all be Unicode strings. + age_limit: Age restriction for the video, as an integer (years) + formats: A list of dictionaries for each format available, it must + be ordered from worst to best quality. Potential fields: + * url Mandatory. The URL of the video file + * ext Will be calculated from url if missing + * format A human-readable description of the format + ("mp4 container with h264/opus"). + Calculated from width and height if missing. + * format_id A short description of the format + ("mp4_h264_opus" or "19") + * width Width of the video, if known + * height Height of the video, if known + + Unless mentioned otherwise, the fields should be Unicode strings. Subclasses of this one should re-define the _real_initialize() and _real_extract() methods and define a _VALID_URL regexp. @@ -76,7 +92,13 @@ class InfoExtractor(object): @classmethod def suitable(cls, url): """Receives a URL and returns True if suitable for this IE.""" - return re.match(cls._VALID_URL, url) is not None + + # This does not use has/getattr intentionally - we want to know whether + # we have cached the regexp for *this* class, whereas getattr would also + # match the superclass + if '_VALID_URL_RE' not in cls.__dict__: + cls._VALID_URL_RE = re.compile(cls._VALID_URL) + return cls._VALID_URL_RE.match(url) is not None @classmethod def working(cls): @@ -106,6 +128,11 @@ class InfoExtractor(object): """Real extraction process. Redefine in subclasses.""" pass + @classmethod + def ie_key(cls): + """A string for getting the InfoExtractor with get_info_extractor""" + return cls.__name__[:-2] + @property def IE_NAME(self): return type(self).__name__[:-2] @@ -121,7 +148,7 @@ class InfoExtractor(object): except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: if errnote is None: errnote = u'Unable to download webpage' - raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2]) + raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2], cause=err) def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None): """ Returns a tuple (page content as string, URL handle) """ @@ -132,12 +159,17 @@ class InfoExtractor(object): urlh = self._request_webpage(url_or_request, video_id, note, errnote) content_type = urlh.headers.get('Content-Type', '') + webpage_bytes = urlh.read() m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type) if m: encoding = m.group(1) else: - encoding = 'utf-8' - webpage_bytes = urlh.read() + m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]', + webpage_bytes[:1024]) + if m: + encoding = m.group(1).decode('ascii') + else: + encoding = 'utf-8' if self._downloader.params.get('dump_intermediate_pages', False): try: url = url_or_request.get_full_url() @@ -270,7 +302,8 @@ class InfoExtractor(object): def _og_search_property(self, prop, html, name=None, **kargs): if name is None: name = 'OpenGraph %s' % prop - return self._html_search_regex(self._og_regex(prop), html, name, flags=re.DOTALL, **kargs) + escaped = self._search_regex(self._og_regex(prop), html, name, flags=re.DOTALL, **kargs) + return unescapeHTML(escaped) def _og_search_thumbnail(self, html, **kargs): return self._og_search_property('image', html, u'thumbnail url', fatal=False, **kargs) @@ -286,6 +319,15 @@ class InfoExtractor(object): self._og_regex('video')], html, name, **kargs) + def _rta_search(self, html): + # See http://www.rtalabel.org/index.php?content=howtofaq#single + if re.search(r'(?ix)<meta\s+name="rating"\s+' + r' content="RTA-5042-1996-1400-1577-RTA"', + html): + return 18 + return 0 + + class SearchInfoExtractor(InfoExtractor): """ Base class for paged search queries extractors. diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py new file mode 100644 index 000000000..f336a3c62 --- /dev/null +++ b/youtube_dl/extractor/condenast.py @@ -0,0 +1,106 @@ +# coding: utf-8 + +import re +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urllib_parse, + orderedSet, + compat_urllib_parse_urlparse, + compat_urlparse, +) + + +class CondeNastIE(InfoExtractor): + """ + Condé Nast is a media group, some of its sites use a custom HTML5 player + that works the same in all of them. + """ + + # The keys are the supported sites and the values are the name to be shown + # to the user and in the extractor description. + _SITES = {'wired': u'WIRED', + 'gq': u'GQ', + 'vogue': u'Vogue', + 'glamour': u'Glamour', + 'wmagazine': u'W Magazine', + 'vanityfair': u'Vanity Fair', + } + + _VALID_URL = r'http://(video|www).(?P<site>%s).com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys()) + IE_DESC = u'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values())) + + _TEST = { + u'url': u'http://video.wired.com/watch/3d-printed-speakers-lit-with-led', + u'file': u'5171b343c2b4c00dd0c1ccb3.mp4', + u'md5': u'1921f713ed48aabd715691f774c451f7', + u'info_dict': { + u'title': u'3D Printed Speakers Lit With LED', + u'description': u'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.', + } + } + + def _extract_series(self, url, webpage): + title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>', + webpage, u'series title', flags=re.DOTALL) + url_object = compat_urllib_parse_urlparse(url) + base_url = '%s://%s' % (url_object.scheme, url_object.netloc) + m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]', + webpage, flags=re.DOTALL) + paths = orderedSet(m.group(1) for m in m_paths) + build_url = lambda path: compat_urlparse.urljoin(base_url, path) + entries = [self.url_result(build_url(path), 'CondeNast') for path in paths] + return self.playlist_result(entries, playlist_title=title) + + def _extract_video(self, webpage): + description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>', + r'<div class="video-post-content">(.+?)</div>', + ], + webpage, u'description', + fatal=False, flags=re.DOTALL) + params = self._search_regex(r'var params = {(.+?)}[;,]', webpage, + u'player params', flags=re.DOTALL) + video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, u'video id') + player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, u'player id') + target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, u'target') + data = compat_urllib_parse.urlencode({'videoId': video_id, + 'playerId': player_id, + 'target': target, + }) + base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]', + webpage, u'base info url', + default='http://player.cnevids.com/player/loader.js?') + info_url = base_info_url + data + info_page = self._download_webpage(info_url, video_id, + u'Downloading video info') + video_info = self._search_regex(r'var video = ({.+?});', info_page, u'video info') + video_info = json.loads(video_info) + + def _formats_sort_key(f): + type_ord = 1 if f['type'] == 'video/mp4' else 0 + quality_ord = 1 if f['quality'] == 'high' else 0 + return (quality_ord, type_ord) + best_format = sorted(video_info['sources'][0], key=_formats_sort_key)[-1] + + return {'id': video_id, + 'url': best_format['src'], + 'ext': best_format['type'].split('/')[-1], + 'title': video_info['title'], + 'thumbnail': video_info['poster_frame'], + 'description': description, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + site = mobj.group('site') + url_type = mobj.group('type') + id = mobj.group('id') + + self.to_screen(u'Extracting from %s with the Condé Nast extractor' % self._SITES[site]) + webpage = self._download_webpage(url, id) + + if url_type == 'series': + return self._extract_series(url, webpage) + else: + return self._extract_video(webpage) diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 9bf7a28ca..7d8353946 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -1,27 +1,58 @@ import re import json +import itertools from .common import InfoExtractor +from .subtitles import SubtitlesInfoExtractor + from ..utils import ( compat_urllib_request, + compat_str, + get_element_by_attribute, + get_element_by_id, + orderedSet, ExtractorError, ) -class DailymotionIE(InfoExtractor): +class DailymotionBaseInfoExtractor(InfoExtractor): + @staticmethod + def _build_request(url): + """Build a request with the family filter disabled""" + request = compat_urllib_request.Request(url) + request.add_header('Cookie', 'family_filter=off') + return request + +class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): """Information Extractor for Dailymotion""" - _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' + _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)' IE_NAME = u'dailymotion' - _TEST = { - u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech', - u'file': u'x33vw9.mp4', - u'md5': u'392c4b85a60a90dc4792da41ce3144eb', - u'info_dict': { - u"uploader": u"Alex and Van .", - u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\"" - } - } + _TESTS = [ + { + u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech', + u'file': u'x33vw9.mp4', + u'md5': u'392c4b85a60a90dc4792da41ce3144eb', + u'info_dict': { + u"uploader": u"Amphora Alex and Van .", + u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\"" + } + }, + # Vevo video + { + u'url': u'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi', + u'file': u'USUV71301934.mp4', + u'info_dict': { + u'title': u'Roar (Official)', + u'uploader': u'Katy Perry', + u'upload_date': u'20130905', + }, + u'params': { + u'skip_download': True, + }, + u'skip': u'VEVO is only available in some countries', + }, + ] def _real_extract(self, url): # Extract id and simplified title from URL @@ -30,15 +61,24 @@ class DailymotionIE(InfoExtractor): video_id = mobj.group(1).split('_')[0].split('?')[0] video_extension = 'mp4' + url = 'http://www.dailymotion.com/video/%s' % video_id # Retrieve video webpage to extract further information - request = compat_urllib_request.Request(url) - request.add_header('Cookie', 'family_filter=off') + request = self._build_request(url) webpage = self._download_webpage(request, video_id) # Extract URL, uploader and title from webpage self.report_extraction(video_id) + # It may just embed a vevo video: + m_vevo = re.search( + r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)', + webpage) + if m_vevo is not None: + vevo_id = m_vevo.group('id') + self.to_screen(u'Vevo video detected: %s' % vevo_id) + return self.url_result(u'vevo:%s' % vevo_id, ie='Vevo') + video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', # Looking for official user r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'], @@ -52,8 +92,12 @@ class DailymotionIE(InfoExtractor): embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id embed_page = self._download_webpage(embed_url, video_id, u'Downloading embed page') - info = self._search_regex(r'var info = ({.*?}),', embed_page, 'video info') + info = self._search_regex(r'var info = ({.*?}),$', embed_page, + 'video info', flags=re.MULTILINE) info = json.loads(info) + if info.get('error') is not None: + msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title'] + raise ExtractorError(msg, expected=True) # TODO: support choosing qualities @@ -68,6 +112,12 @@ class DailymotionIE(InfoExtractor): raise ExtractorError(u'Unable to extract video URL') video_url = info[max_quality] + # subtitles + video_subtitles = self.extract_subtitles(video_id) + if self._downloader.params.get('listsubtitles', False): + self._list_available_subtitles(video_id) + return + return [{ 'id': video_id, 'url': video_url, @@ -75,5 +125,76 @@ class DailymotionIE(InfoExtractor): 'upload_date': video_upload_date, 'title': self._og_search_title(webpage), 'ext': video_extension, + 'subtitles': video_subtitles, 'thumbnail': info['thumbnail_url'] }] + + def _get_available_subtitles(self, video_id): + try: + sub_list = self._download_webpage( + 'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id, + video_id, note=False) + except ExtractorError as err: + self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err)) + return {} + info = json.loads(sub_list) + if (info['total'] > 0): + sub_lang_list = dict((l['language'], l['url']) for l in info['list']) + return sub_lang_list + self._downloader.report_warning(u'video doesn\'t have subtitles') + return {} + + +class DailymotionPlaylistIE(DailymotionBaseInfoExtractor): + IE_NAME = u'dailymotion:playlist' + _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/' + _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>' + _PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s' + + def _extract_entries(self, id): + video_ids = [] + for pagenum in itertools.count(1): + request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum)) + webpage = self._download_webpage(request, + id, u'Downloading page %s' % pagenum) + + playlist_el = get_element_by_attribute(u'class', u'video_list', webpage) + video_ids.extend(re.findall(r'data-id="(.+?)"', playlist_el)) + + if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None: + break + return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') + for video_id in orderedSet(video_ids)] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + playlist_id = mobj.group('id') + webpage = self._download_webpage(url, playlist_id) + + return {'_type': 'playlist', + 'id': playlist_id, + 'title': get_element_by_id(u'playlist_name', webpage), + 'entries': self._extract_entries(playlist_id), + } + + +class DailymotionUserIE(DailymotionPlaylistIE): + IE_NAME = u'dailymotion:user' + _VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)' + _MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>' + _PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + user = mobj.group('user') + webpage = self._download_webpage(url, user) + full_user = self._html_search_regex( + r'<a class="label" href="/%s".*?>(.*?)</' % re.escape(user), + webpage, u'user', flags=re.DOTALL) + + return { + '_type': 'playlist', + 'id': user, + 'title': full_user, + 'entries': self._extract_entries(user), + } diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py new file mode 100644 index 000000000..a804e83bd --- /dev/null +++ b/youtube_dl/extractor/daum.py @@ -0,0 +1,74 @@ +# encoding: utf-8 +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( + compat_urllib_parse, + determine_ext, +) + + +class DaumIE(InfoExtractor): + _VALID_URL = r'https?://tvpot\.daum\.net/.*?clipid=(?P<id>\d+)' + IE_NAME = u'daum.net' + + _TEST = { + u'url': u'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690', + u'file': u'52554690.mp4', + u'info_dict': { + u'title': u'DOTA 2GETHER 시즌2 6회 - 2부', + u'description': u'DOTA 2GETHER 시즌2 6회 - 2부', + u'upload_date': u'20130831', + u'duration': 3868, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group(1) + canonical_url = 'http://tvpot.daum.net/v/%s' % video_id + webpage = self._download_webpage(canonical_url, video_id) + full_id = self._search_regex(r'<link rel="video_src" href=".+?vid=(.+?)"', + webpage, u'full id') + query = compat_urllib_parse.urlencode({'vid': full_id}) + info_xml = self._download_webpage( + 'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id, + u'Downloading video info') + urls_xml = self._download_webpage( + 'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query, + video_id, u'Downloading video formats info') + info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) + urls = xml.etree.ElementTree.fromstring(urls_xml.encode('utf-8')) + + self.to_screen(u'%s: Getting video urls' % video_id) + formats = [] + for format_el in urls.findall('result/output_list/output_list'): + profile = format_el.attrib['profile'] + format_query = compat_urllib_parse.urlencode({ + 'vid': full_id, + 'profile': profile, + }) + url_xml = self._download_webpage( + 'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query, + video_id, note=False) + url_doc = xml.etree.ElementTree.fromstring(url_xml.encode('utf-8')) + format_url = url_doc.find('result/url').text + formats.append({ + 'url': format_url, + 'ext': determine_ext(format_url), + 'format_id': profile, + }) + + info = { + 'id': video_id, + 'title': info.find('TITLE').text, + 'formats': formats, + 'thumbnail': self._og_search_thumbnail(webpage), + 'description': info.find('CONTENTS').text, + 'duration': int(info.find('DURATION').text), + 'upload_date': info.find('REGDTTM').text[:8], + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + return info diff --git a/youtube_dl/extractor/defense.py b/youtube_dl/extractor/defense.py new file mode 100644 index 000000000..424d960da --- /dev/null +++ b/youtube_dl/extractor/defense.py @@ -0,0 +1,39 @@ +import re +import json + +from .common import InfoExtractor + + +class DefenseGouvFrIE(InfoExtractor): + _IE_NAME = 'defense.gouv.fr' + _VALID_URL = (r'http://.*?\.defense\.gouv\.fr/layout/set/' + r'ligthboxvideo/base-de-medias/webtv/(.*)') + + _TEST = { + u'url': (u'http://www.defense.gouv.fr/layout/set/ligthboxvideo/' + u'base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1'), + u'file': u'11213.mp4', + u'md5': u'75bba6124da7e63d2d60b5244ec9430c', + "info_dict": { + "title": "attaque-chimique-syrienne-du-21-aout-2013-1" + } + } + + def _real_extract(self, url): + title = re.match(self._VALID_URL, url).group(1) + webpage = self._download_webpage(url, title) + video_id = self._search_regex( + r"flashvars.pvg_id=\"(\d+)\";", + webpage, 'ID') + + json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/' + + video_id) + info = self._download_webpage(json_url, title, + 'Downloading JSON config') + video_url = json.loads(info)['renditions'][0]['url'] + + return {'id': video_id, + 'ext': 'mp4', + 'url': video_url, + 'title': title, + } diff --git a/youtube_dl/extractor/dreisat.py b/youtube_dl/extractor/dreisat.py index 64b465805..765cb1f37 100644 --- a/youtube_dl/extractor/dreisat.py +++ b/youtube_dl/extractor/dreisat.py @@ -54,6 +54,7 @@ class DreiSatIE(InfoExtractor): 'width': int(fe.find('./width').text), 'height': int(fe.find('./height').text), 'url': fe.find('./url').text, + 'ext': determine_ext(fe.find('./url').text), 'filesize': int(fe.find('./filesize').text), 'video_bitrate': int(fe.find('./videoBitrate').text), '3sat_qualityname': fe.find('./quality').text, @@ -79,7 +80,6 @@ class DreiSatIE(InfoExtractor): } # TODO: Remove when #980 has been merged - info['url'] = formats[-1]['url'] - info['ext'] = determine_ext(formats[-1]['url']) + info.update(formats[-1]) - return info
\ No newline at end of file + return info diff --git a/youtube_dl/extractor/ebaumsworld.py b/youtube_dl/extractor/ebaumsworld.py new file mode 100644 index 000000000..f02c6998b --- /dev/null +++ b/youtube_dl/extractor/ebaumsworld.py @@ -0,0 +1,37 @@ +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import determine_ext + + +class EbaumsWorldIE(InfoExtractor): + _VALID_URL = r'https?://www\.ebaumsworld\.com/video/watch/(?P<id>\d+)' + + _TEST = { + u'url': u'http://www.ebaumsworld.com/video/watch/83367677/', + u'file': u'83367677.mp4', + u'info_dict': { + u'title': u'A Giant Python Opens The Door', + u'description': u'This is how nightmares start...', + u'uploader': u'jihadpizza', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + config_xml = self._download_webpage( + 'http://www.ebaumsworld.com/video/player/%s' % video_id, video_id) + config = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8')) + video_url = config.find('file').text + + return { + 'id': video_id, + 'title': config.find('title').text, + 'url': video_url, + 'ext': determine_ext(video_url), + 'description': config.find('description').text, + 'thumbnail': config.find('image').text, + 'uploader': config.find('username').text, + } diff --git a/youtube_dl/extractor/exfm.py b/youtube_dl/extractor/exfm.py new file mode 100644 index 000000000..3443f19c5 --- /dev/null +++ b/youtube_dl/extractor/exfm.py @@ -0,0 +1,54 @@ +import re +import json + +from .common import InfoExtractor + + +class ExfmIE(InfoExtractor): + IE_NAME = u'exfm' + IE_DESC = u'ex.fm' + _VALID_URL = r'(?:http://)?(?:www\.)?ex\.fm/song/([^/]+)' + _SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud.com/tracks/([^/]+)/stream' + _TESTS = [ + { + u'url': u'http://ex.fm/song/1bgtzg', + u'file': u'95223130.mp3', + u'md5': u'8a7967a3fef10e59a1d6f86240fd41cf', + u'info_dict': { + u"title": u"We Can't Stop - Miley Cyrus", + u"uploader": u"Miley Cyrus", + u'upload_date': u'20130603', + u'description': u'Download "We Can\'t Stop" \r\niTunes: http://smarturl.it/WeCantStop?IQid=SC\r\nAmazon: http://smarturl.it/WeCantStopAMZ?IQid=SC', + }, + u'note': u'Soundcloud song', + }, + { + u'url': u'http://ex.fm/song/wddt8', + u'file': u'wddt8.mp3', + u'md5': u'966bd70741ac5b8570d8e45bfaed3643', + u'info_dict': { + u'title': u'Safe and Sound', + u'uploader': u'Capital Cities', + }, + }, + ] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + song_id = mobj.group(1) + info_url = "http://ex.fm/api/v3/song/%s" %(song_id) + webpage = self._download_webpage(info_url, song_id) + info = json.loads(webpage) + song_url = info['song']['url'] + if re.match(self._SOUNDCLOUD_URL, song_url) is not None: + self.to_screen('Soundcloud song detected') + return self.url_result(song_url.replace('/stream',''), 'Soundcloud') + return [{ + 'id': song_id, + 'url': song_url, + 'ext': 'mp3', + 'title': info['song']['title'], + 'thumbnail': info['song']['image']['large'], + 'uploader': info['song']['artist'], + 'view_count': info['song']['loved_count'], + }] diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py index beaa5b4bd..9d1bc0751 100644 --- a/youtube_dl/extractor/facebook.py +++ b/youtube_dl/extractor/facebook.py @@ -106,8 +106,8 @@ class FacebookIE(InfoExtractor): video_duration = int(video_data['video_duration']) thumbnail = video_data['thumbnail_src'] - video_title = self._html_search_regex('<h2 class="uiHeaderTitle">([^<]+)</h2>', - webpage, u'title') + video_title = self._html_search_regex( + r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, u'title') info = { 'id': video_id, diff --git a/youtube_dl/extractor/faz.py b/youtube_dl/extractor/faz.py new file mode 100644 index 000000000..deaa4ed2d --- /dev/null +++ b/youtube_dl/extractor/faz.py @@ -0,0 +1,60 @@ +# encoding: utf-8 +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + clean_html, + get_element_by_attribute, +) + + +class FazIE(InfoExtractor): + IE_NAME = u'faz.net' + _VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+).html' + + _TEST = { + u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html', + u'file': u'12610585.mp4', + u'info_dict': { + u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher', + u'description': u'md5:1453fbf9a0d041d985a47306192ea253', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + self.to_screen(video_id) + webpage = self._download_webpage(url, video_id) + config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage, + u'config xml url') + config_xml = self._download_webpage(config_xml_url, video_id, + u'Downloading config xml') + config = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8')) + + encodings = config.find('ENCODINGS') + formats = [] + for code in ['LOW', 'HIGH', 'HQ']: + encoding = encodings.find(code) + if encoding is None: + continue + encoding_url = encoding.find('FILENAME').text + formats.append({ + 'url': encoding_url, + 'ext': determine_ext(encoding_url), + 'format_id': code.lower(), + }) + + descr_html = get_element_by_attribute('class', 'Content Copy', webpage) + info = { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'formats': formats, + 'description': clean_html(descr_html), + 'thumbnail': config.find('STILL/STILL_BIG').text, + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + return info diff --git a/youtube_dl/extractor/fktv.py b/youtube_dl/extractor/fktv.py new file mode 100644 index 000000000..9c89362ef --- /dev/null +++ b/youtube_dl/extractor/fktv.py @@ -0,0 +1,79 @@ +import re +import random +import json + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + get_element_by_id, + clean_html, +) + + +class FKTVIE(InfoExtractor): + IE_NAME = u'fernsehkritik.tv' + _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik.tv/folge-(?P<ep>[0-9]+)(?:/.*)?' + + _TEST = { + u'url': u'http://fernsehkritik.tv/folge-1', + u'file': u'00011.flv', + u'info_dict': { + u'title': u'Folge 1 vom 10. April 2007', + u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + episode = int(mobj.group('ep')) + + server = random.randint(2, 4) + video_thumbnail = 'http://fernsehkritik.tv/images/magazin/folge%d.jpg' % episode + start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode, + episode) + playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage, + u'playlist', flags=re.DOTALL) + files = json.loads(re.sub('{[^{}]*?}', '{}', playlist)) + # TODO: return a single multipart video + videos = [] + for i, _ in enumerate(files, 1): + video_id = '%04d%d' % (episode, i) + video_url = 'http://dl%d.fernsehkritik.tv/fernsehkritik%d%s.flv' % (server, episode, '' if i == 1 else '-%d' % i) + video_title = 'Fernsehkritik %d.%d' % (episode, i) + videos.append({ + 'id': video_id, + 'url': video_url, + 'ext': determine_ext(video_url), + 'title': clean_html(get_element_by_id('eptitle', start_webpage)), + 'description': clean_html(get_element_by_id('contentlist', start_webpage)), + 'thumbnail': video_thumbnail + }) + return videos + + +class FKTVPosteckeIE(InfoExtractor): + IE_NAME = u'fernsehkritik.tv:postecke' + _VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik.tv/inline-video/postecke.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)' + _TEST = { + u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120', + u'file': u'0120.flv', + u'md5': u'262f0adbac80317412f7e57b4808e5c4', + u'info_dict': { + u"title": u"Postecke 120" + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + episode = int(mobj.group('ep')) + + server = random.randint(2, 4) + video_id = '%04d' % episode + video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode) + video_title = 'Postecke %d' % episode + return { + 'id': video_id, + 'url': video_url, + 'ext': determine_ext(video_url), + 'title': video_title, + } diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py index 80d96baf7..e1d2f0526 100644 --- a/youtube_dl/extractor/flickr.py +++ b/youtube_dl/extractor/flickr.py @@ -9,7 +9,7 @@ from ..utils import ( class FlickrIE(InfoExtractor): """Information Extractor for Flickr videos""" - _VALID_URL = r'(?:https?://)?(?:www\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*' + _VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*' _TEST = { u'url': u'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/', u'file': u'5645318632.mp4', diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py new file mode 100644 index 000000000..086cafca0 --- /dev/null +++ b/youtube_dl/extractor/francetv.py @@ -0,0 +1,129 @@ +# encoding: utf-8 +import re +import xml.etree.ElementTree +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urlparse, +) + + +class FranceTVBaseInfoExtractor(InfoExtractor): + def _extract_video(self, video_id): + xml_desc = self._download_webpage( + 'http://www.francetvinfo.fr/appftv/webservices/video/' + 'getInfosOeuvre.php?id-diffusion=' + + video_id, video_id, 'Downloading XML config') + info = xml.etree.ElementTree.fromstring(xml_desc.encode('utf-8')) + + manifest_url = info.find('videos/video/url').text + video_url = manifest_url.replace('manifest.f4m', 'index_2_av.m3u8') + video_url = video_url.replace('/z/', '/i/') + thumbnail_path = info.find('image').text + + return {'id': video_id, + 'ext': 'mp4', + 'url': video_url, + 'title': info.find('titre').text, + 'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path), + 'description': info.find('synopsis').text, + } + + +class PluzzIE(FranceTVBaseInfoExtractor): + IE_NAME = u'pluzz.francetv.fr' + _VALID_URL = r'https?://pluzz\.francetv\.fr/videos/(.*?)\.html' + + # Can't use tests, videos expire in 7 days + + def _real_extract(self, url): + title = re.match(self._VALID_URL, url).group(1) + webpage = self._download_webpage(url, title) + video_id = self._search_regex( + r'data-diffusion="(\d+)"', webpage, 'ID') + return self._extract_video(video_id) + + +class FranceTvInfoIE(FranceTVBaseInfoExtractor): + IE_NAME = u'francetvinfo.fr' + _VALID_URL = r'https?://www\.francetvinfo\.fr/replay.*/(?P<title>.+).html' + + _TEST = { + u'url': u'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html', + u'file': u'84981923.mp4', + u'info_dict': { + u'title': u'Soir 3', + }, + u'params': { + u'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + page_title = mobj.group('title') + webpage = self._download_webpage(url, page_title) + video_id = self._search_regex(r'id-video=(\d+?)"', webpage, u'video id') + return self._extract_video(video_id) + + +class France2IE(FranceTVBaseInfoExtractor): + IE_NAME = u'france2.fr' + _VALID_URL = r'''(?x)https?://www\.france2\.fr/ + (?: + emissions/.*?/videos/(?P<id>\d+) + | emission/(?P<key>[^/?]+) + )''' + + _TEST = { + u'url': u'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104', + u'file': u'75540104.mp4', + u'info_dict': { + u'title': u'13h15, le samedi...', + u'description': u'md5:2e5b58ba7a2d3692b35c792be081a03d', + }, + u'params': { + u'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj.group('key'): + webpage = self._download_webpage(url, mobj.group('key')) + video_id = self._html_search_regex( + r'''(?x)<div\s+class="video-player">\s* + <a\s+href="http://videos.francetv.fr/video/([0-9]+)"\s+ + class="francetv-video-player">''', + webpage, u'video ID') + else: + video_id = mobj.group('id') + return self._extract_video(video_id) + + +class GenerationQuoiIE(InfoExtractor): + IE_NAME = u'france2.fr:generation-quoi' + _VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<name>.*)(\?|$)' + + _TEST = { + u'url': u'http://generation-quoi.france2.fr/portrait/garde-a-vous', + u'file': u'k7FJX8VBcvvLmX4wA5Q.mp4', + u'info_dict': { + u'title': u'Génération Quoi - Garde à Vous', + u'uploader': u'Génération Quoi', + }, + u'params': { + # It uses Dailymotion + u'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + name = mobj.group('name') + info_url = compat_urlparse.urljoin(url, '/medias/video/%s.json' % name) + info_json = self._download_webpage(info_url, name) + info = json.loads(info_json) + return self.url_result('http://www.dailymotion.com/video/%s' % info['id'], + ie='Dailymotion') diff --git a/youtube_dl/extractor/freesound.py b/youtube_dl/extractor/freesound.py new file mode 100644 index 000000000..de14b12e5 --- /dev/null +++ b/youtube_dl/extractor/freesound.py @@ -0,0 +1,36 @@ +import re + +from .common import InfoExtractor +from ..utils import determine_ext + +class FreesoundIE(InfoExtractor): + _VALID_URL = r'(?:https?://)?(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)' + _TEST = { + u'url': u'http://www.freesound.org/people/miklovan/sounds/194503/', + u'file': u'194503.mp3', + u'md5': u'12280ceb42c81f19a515c745eae07650', + u'info_dict': { + u"title": u"gulls in the city.wav", + u"uploader" : u"miklovan", + u'description': u'the sounds of seagulls in the city', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + music_id = mobj.group('id') + webpage = self._download_webpage(url, music_id) + title = self._html_search_regex(r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>', + webpage, 'music title', flags=re.DOTALL) + music_url = self._og_search_property('audio', webpage, 'music url') + description = self._html_search_regex(r'<div id="sound_description">(.*?)</div>', + webpage, 'description', fatal=False, flags=re.DOTALL) + + return [{ + 'id': music_id, + 'title': title, + 'url': music_url, + 'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'), + 'ext': determine_ext(music_url), + 'description': description, + }] diff --git a/youtube_dl/extractor/funnyordie.py b/youtube_dl/extractor/funnyordie.py index 67a7e5f76..2ccdb7073 100644 --- a/youtube_dl/extractor/funnyordie.py +++ b/youtube_dl/extractor/funnyordie.py @@ -21,17 +21,15 @@ class FunnyOrDieIE(InfoExtractor): video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) - video_url = self._html_search_regex(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', + video_url = self._search_regex( + [r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''], webpage, u'video URL', flags=re.DOTALL) - title = self._html_search_regex((r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>", - r'<title>(?P<title>[^<]+?)</title>'), webpage, 'title', flags=re.DOTALL) - info = { 'id': video_id, 'url': video_url, 'ext': 'mp4', - 'title': title, + 'title': self._og_search_title(webpage), 'description': self._og_search_description(webpage), } return [info] diff --git a/youtube_dl/extractor/gamespot.py b/youtube_dl/extractor/gamespot.py index 7585b7061..098768361 100644 --- a/youtube_dl/extractor/gamespot.py +++ b/youtube_dl/extractor/gamespot.py @@ -1,55 +1,59 @@ import re -import xml.etree.ElementTree +import json from .common import InfoExtractor from ..utils import ( - unified_strdate, compat_urllib_parse, + compat_urlparse, + unescapeHTML, + get_meta_content, ) + class GameSpotIE(InfoExtractor): _VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?' _TEST = { u"url": u"http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/", - u"file": u"6410818.mp4", + u"file": u"gs-2300-6410818.mp4", u"md5": u"b2a30deaa8654fcccd43713a6b6a4825", u"info_dict": { - u"title": u"Arma III - Community Guide: SITREP I", - u"upload_date": u"20130627", + u"title": u"Arma 3 - Community Guide: SITREP I", + u'description': u'Check out this video where some of the basics of Arma 3 is explained.', } } - def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - page_id = mobj.group('page_id') + page_id = video_id = mobj.group('page_id') webpage = self._download_webpage(url, page_id) - video_id = self._html_search_regex([r'"og:video" content=".*?\?id=(\d+)"', - r'http://www\.gamespot\.com/videoembed/(\d+)'], - webpage, 'video id') - data = compat_urllib_parse.urlencode({'id': video_id, 'newplayer': '1'}) - info_url = 'http://www.gamespot.com/pages/video_player/xml.php?' + data - info_xml = self._download_webpage(info_url, video_id) - doc = xml.etree.ElementTree.fromstring(info_xml) - clip_el = doc.find('./playList/clip') + data_video_json = self._search_regex(r'data-video=\'(.*?)\'', webpage, u'data video') + data_video = json.loads(unescapeHTML(data_video_json)) - http_urls = [{'url': node.find('filePath').text, - 'rate': int(node.find('rate').text)} - for node in clip_el.find('./httpURI')] - best_quality = sorted(http_urls, key=lambda f: f['rate'])[-1] - video_url = best_quality['url'] - title = clip_el.find('./title').text - ext = video_url.rpartition('.')[2] - thumbnail_url = clip_el.find('./screenGrabURI').text - view_count = int(clip_el.find('./views').text) - upload_date = unified_strdate(clip_el.find('./postDate').text) + # Transform the manifest url to a link to the mp4 files + # they are used in mobile devices. + f4m_url = data_video['videoStreams']['f4m_stream'] + f4m_path = compat_urlparse.urlparse(f4m_url).path + QUALITIES_RE = r'((,\d+)+,?)' + qualities = self._search_regex(QUALITIES_RE, f4m_path, u'qualities').strip(',').split(',') + http_path = f4m_path[1:].split('/', 1)[1] + http_template = re.sub(QUALITIES_RE, r'%s', http_path) + http_template = http_template.replace('.csmil/manifest.f4m', '') + http_template = compat_urlparse.urljoin('http://video.gamespotcdn.com/', http_template) + formats = [] + for q in qualities: + formats.append({ + 'url': http_template % q, + 'ext': 'mp4', + 'format_id': q, + }) - return [{ - 'id' : video_id, - 'url' : video_url, - 'ext' : ext, - 'title' : title, - 'thumbnail' : thumbnail_url, - 'upload_date' : upload_date, - 'view_count' : view_count, - }] + info = { + 'id': data_video['guid'], + 'title': compat_urllib_parse.unquote(data_video['title']), + 'formats': formats, + 'description': get_meta_content('description', webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + return info diff --git a/youtube_dl/extractor/gametrailers.py b/youtube_dl/extractor/gametrailers.py index cd438bd2f..3cc02d97e 100644 --- a/youtube_dl/extractor/gametrailers.py +++ b/youtube_dl/extractor/gametrailers.py @@ -1,63 +1,36 @@ import re -import xml.etree.ElementTree -from .common import InfoExtractor -from ..utils import ( - compat_urllib_parse, +from .mtv import MTVIE, _media_xml_tag - ExtractorError, -) - -class GametrailersIE(InfoExtractor): +class GametrailersIE(MTVIE): + """ + Gametrailers use the same videos system as MTVIE, it just changes the feed + url, where the uri is and the method to get the thumbnails. + """ _VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)' _TEST = { u'url': u'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer', - u'file': u'70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.flv', - u'md5': u'c3edbc995ab4081976e16779bd96a878', + u'file': u'70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.mp4', + u'md5': u'4c8e67681a0ea7ec241e8c09b3ea8cf7', u'info_dict': { - u"title": u"E3 2013: Debut Trailer" + u'title': u'E3 2013: Debut Trailer', + u'description': u'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!', }, - u'skip': u'Requires rtmpdump' } + # Overwrite MTVIE properties we don't want + _TESTS = [] + + _FEED_URL = 'http://www.gametrailers.com/feeds/mrss' + + def _get_thumbnail_url(self, uri, itemdoc): + search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail')) + return itemdoc.find(search_path).attrib['url'] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) mgid = self._search_regex([r'data-video="(?P<mgid>mgid:.*?)"', r'data-contentId=\'(?P<mgid>mgid:.*?)\''], webpage, u'mgid') - - data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'}) - info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data, - video_id, u'Downloading video info') - doc = xml.etree.ElementTree.fromstring(info_page.encode('utf-8')) - default_thumb = doc.find('./channel/image/url').text - - media_namespace = {'media': 'http://search.yahoo.com/mrss/'} - parts = [{ - 'title': video_doc.find('title').text, - 'ext': 'flv', - 'id': video_doc.find('guid').text.rpartition(':')[2], - # Videos are actually flv not mp4 - 'url': self._get_video_url(video_doc.find('media:group/media:content', media_namespace).attrib['url'], video_id), - # The thumbnail may not be defined, it would be '' - 'thumbnail': video_doc.find('media:group/media:thumbnail', media_namespace).attrib['url'] or default_thumb, - 'description': video_doc.find('description').text, - } for video_doc in doc.findall('./channel/item')] - return parts - - def _get_video_url(self, mediagen_url, video_id): - if 'acceptMethods' not in mediagen_url: - mediagen_url += '&acceptMethods=fms' - links_webpage = self._download_webpage(mediagen_url, - video_id, u'Downloading video urls info') - doc = xml.etree.ElementTree.fromstring(links_webpage) - urls = list(doc.iter('src')) - if len(urls) == 0: - raise ExtractorError(u'Unable to extract video url') - # They are sorted from worst to best quality - return urls[-1].text - + return self._get_videos_info(mgid) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index b633e896c..d48c84f8d 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -8,11 +8,13 @@ from ..utils import ( compat_urllib_error, compat_urllib_parse, compat_urllib_request, + compat_urlparse, ExtractorError, ) from .brightcove import BrightcoveIE + class GenericIE(InfoExtractor): IE_DESC = u'Generic downloader that works on some sites' _VALID_URL = r'.*' @@ -23,21 +25,10 @@ class GenericIE(InfoExtractor): u'file': u'13601338388002.mp4', u'md5': u'85b90ccc9d73b4acd9138d3af4c27f89', u'info_dict': { - u"uploader": u"www.hodiho.fr", + u"uploader": u"www.hodiho.fr", u"title": u"R\u00e9gis plante sa Jeep" } }, - { - u'url': u'http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/', - u'file': u'2371591881001.mp4', - u'md5': u'9e80619e0a94663f0bdc849b4566af19', - u'note': u'Test Brightcove downloads and detection in GenericIE', - u'info_dict': { - u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', - u'uploader': u'8TV', - u'description': u'md5:a950cc4285c43e44d763d036710cd9cd', - } - }, ] def report_download_webpage(self, video_id): @@ -107,8 +98,18 @@ class GenericIE(InfoExtractor): return new_url def _real_extract(self, url): - new_url = self._test_redirect(url) - if new_url: return [self.url_result(new_url)] + parsed_url = compat_urlparse.urlparse(url) + if not parsed_url.scheme: + self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http') + return self.url_result('http://' + url) + + try: + new_url = self._test_redirect(url) + if new_url: + return [self.url_result(new_url)] + except compat_urllib_error.HTTPError: + # This may be a stupid server that doesn't like HEAD, our UA, or so + pass video_id = url.split('/')[-1] try: @@ -116,11 +117,11 @@ class GenericIE(InfoExtractor): except ValueError: # since this is the last-resort InfoExtractor, if # this error is thrown, it'll be thrown here - raise ExtractorError(u'Invalid URL: %s' % url) + raise ExtractorError(u'Failed to download URL: %s' % url) self.report_extraction(video_id) - # Look for BrigthCove: - m_brightcove = re.search(r'<object.+?class=([\'"]).*?BrightcoveExperience.*?\1.+?</object>', webpage, re.DOTALL) + # Look for BrightCove: + m_brightcove = re.search(r'<object[^>]+?class=([\'"])[^>]*?BrightcoveExperience.*?\1.+?</object>', webpage, re.DOTALL) if m_brightcove is not None: self.to_screen(u'Brightcove video detected.') bc_url = BrightcoveIE._build_brighcove_url(m_brightcove.group()) @@ -145,15 +146,19 @@ class GenericIE(InfoExtractor): if m_video_type is not None: mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage) if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) + # HTML5 video + mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL) + if mobj is None: + raise ExtractorError(u'Unsupported URL: %s' % url) # It's possible that one of the regexes # matched, but returned an empty group: if mobj.group(1) is None: - raise ExtractorError(u'Invalid URL: %s' % url) + raise ExtractorError(u'Did not find a valid video URL at %s' % url) - video_url = compat_urllib_parse.unquote(mobj.group(1)) - video_id = os.path.basename(video_url) + video_url = mobj.group(1) + video_url = compat_urlparse.urljoin(url, video_url) + video_id = compat_urllib_parse.unquote(os.path.basename(video_url)) # here's a fun little line of code for you: video_extension = os.path.splitext(video_id)[1][1:] diff --git a/youtube_dl/extractor/googleplus.py b/youtube_dl/extractor/googleplus.py index 9f7fc19a4..ab12d7e93 100644 --- a/youtube_dl/extractor/googleplus.py +++ b/youtube_dl/extractor/googleplus.py @@ -40,7 +40,9 @@ class GooglePlusIE(InfoExtractor): self.report_extraction(video_id) # Extract update date - upload_date = self._html_search_regex('title="Timestamp">(.*?)</a>', + upload_date = self._html_search_regex( + r'''(?x)<a.+?class="o-T-s\s[^"]+"\s+style="display:\s*none"\s*> + ([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''', webpage, u'upload date', fatal=False) if upload_date: # Convert timestring to a format suitable for filename @@ -57,8 +59,8 @@ class GooglePlusIE(InfoExtractor): webpage, 'title', default=u'NA') # Step 2, Simulate clicking the image box to launch video - DOMAIN = 'https://plus.google.com' - video_page = self._search_regex(r'<a href="((?:%s)?/photos/.*?)"' % re.escape(DOMAIN), + DOMAIN = 'https://plus.google.com/' + video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN), webpage, u'video page URL') if not video_page.startswith(DOMAIN): video_page = DOMAIN + video_page diff --git a/youtube_dl/extractor/hark.py b/youtube_dl/extractor/hark.py new file mode 100644 index 000000000..5bdd08afa --- /dev/null +++ b/youtube_dl/extractor/hark.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import re +import json + +from .common import InfoExtractor +from ..utils import determine_ext + +class HarkIE(InfoExtractor): + _VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+' + _TEST = { + u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013', + u'file': u'mmbzyhkgny.mp3', + u'md5': u'6783a58491b47b92c7c1af5a77d4cbee', + u'info_dict': { + u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013", + u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.', + u'duration': 11, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group(1) + json_url = "http://www.hark.com/clips/%s.json" %(video_id) + info_json = self._download_webpage(json_url, video_id) + info = json.loads(info_json) + final_url = info['url'] + + return {'id': video_id, + 'url' : final_url, + 'title': info['name'], + 'ext': determine_ext(final_url), + 'description': info['description'], + 'thumbnail': info['image_original'], + 'duration': info['duration'], + } diff --git a/youtube_dl/extractor/hotnewhiphop.py b/youtube_dl/extractor/hotnewhiphop.py index ccca1d7e0..3798118a7 100644 --- a/youtube_dl/extractor/hotnewhiphop.py +++ b/youtube_dl/extractor/hotnewhiphop.py @@ -7,11 +7,11 @@ from .common import InfoExtractor class HotNewHipHopIE(InfoExtractor): _VALID_URL = r'http://www\.hotnewhiphop.com/.*\.(?P<id>.*)\.html' _TEST = { - u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html'", + u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html", u'file': u'1435540.mp3', u'md5': u'2c2cd2f76ef11a9b3b581e8b232f3d96', u'info_dict': { - u"title": u"Freddie Gibbs Songs - Lay It Down" + u"title": u"Freddie Gibbs - Lay It Down" } } diff --git a/youtube_dl/extractor/howcast.py b/youtube_dl/extractor/howcast.py index 6104c4b5e..46954337f 100644 --- a/youtube_dl/extractor/howcast.py +++ b/youtube_dl/extractor/howcast.py @@ -19,8 +19,7 @@ class HowcastIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - webpage_url = 'http://www.howcast.com/videos/' + video_id - webpage = self._download_webpage(webpage_url, video_id) + webpage = self._download_webpage(url, video_id) self.report_extraction(video_id) diff --git a/youtube_dl/extractor/ign.py b/youtube_dl/extractor/ign.py index 62abab655..c52146f7d 100644 --- a/youtube_dl/extractor/ign.py +++ b/youtube_dl/extractor/ign.py @@ -13,7 +13,7 @@ class IGNIE(InfoExtractor): Some videos of it.ign.com are also supported """ - _VALID_URL = r'https?://.+?\.ign\.com/(?:videos|show_videos)(/.+)?/(?P<name_or_id>.+)' + _VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)' IE_NAME = u'ign.com' _CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config' @@ -21,15 +21,39 @@ class IGNIE(InfoExtractor): r'id="my_show_video">.*?<p>(.*?)</p>', ] - _TEST = { - u'url': u'http://www.ign.com/videos/2013/06/05/the-last-of-us-review', - u'file': u'8f862beef863986b2785559b9e1aa599.mp4', - u'md5': u'eac8bdc1890980122c3b66f14bdd02e9', - u'info_dict': { - u'title': u'The Last of Us Review', - u'description': u'md5:c8946d4260a4d43a00d5ae8ed998870c', - } - } + _TESTS = [ + { + u'url': u'http://www.ign.com/videos/2013/06/05/the-last-of-us-review', + u'file': u'8f862beef863986b2785559b9e1aa599.mp4', + u'md5': u'eac8bdc1890980122c3b66f14bdd02e9', + u'info_dict': { + u'title': u'The Last of Us Review', + u'description': u'md5:c8946d4260a4d43a00d5ae8ed998870c', + } + }, + { + u'url': u'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind', + u'playlist': [ + { + u'file': u'5ebbd138523268b93c9141af17bec937.mp4', + u'info_dict': { + u'title': u'GTA 5 Video Review', + u'description': u'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.', + }, + }, + { + u'file': u'638672ee848ae4ff108df2a296418ee2.mp4', + u'info_dict': { + u'title': u'GTA 5\'s Twisted Beauty in Super Slow Motion', + u'description': u'The twisted beauty of GTA 5 in stunning slow motion.', + }, + }, + ], + u'params': { + u'skip_download': True, + }, + }, + ] def _find_video_id(self, webpage): res_id = [r'data-video-id="(.+?)"', @@ -41,7 +65,18 @@ class IGNIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) name_or_id = mobj.group('name_or_id') + page_type = mobj.group('type') webpage = self._download_webpage(url, name_or_id) + if page_type == 'articles': + video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, u'video url') + return self.url_result(video_url, ie='IGN') + elif page_type != 'video': + multiple_urls = re.findall( + '<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]', + webpage) + if multiple_urls: + return [self.url_result(u, ie='IGN') for u in multiple_urls] + video_id = self._find_video_id(webpage) result = self._get_video_info(video_id) description = self._html_search_regex(self._DESCRIPTION_RE, @@ -68,7 +103,7 @@ class IGNIE(InfoExtractor): class OneUPIE(IGNIE): """Extractor for 1up.com, it uses the ign videos system.""" - _VALID_URL = r'https?://gamevideos.1up.com/video/id/(?P<name_or_id>.+)' + _VALID_URL = r'https?://gamevideos.1up.com/(?P<type>video)/id/(?P<name_or_id>.+)' IE_NAME = '1up.com' _DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>' @@ -83,6 +118,9 @@ class OneUPIE(IGNIE): } } + # Override IGN tests + _TESTS = [] + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) id = mobj.group('name_or_id') diff --git a/youtube_dl/extractor/ina.py b/youtube_dl/extractor/ina.py index 962c59214..652f19b7b 100644 --- a/youtube_dl/extractor/ina.py +++ b/youtube_dl/extractor/ina.py @@ -5,7 +5,7 @@ from .common import InfoExtractor class InaIE(InfoExtractor): """Information Extractor for Ina.fr""" - _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*' + _VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I?[A-F0-9]+)/.*' _TEST = { u'url': u'www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html', u'file': u'I12055569.mp4', diff --git a/youtube_dl/extractor/instagram.py b/youtube_dl/extractor/instagram.py index f9ac8d5b4..ddc42882a 100644 --- a/youtube_dl/extractor/instagram.py +++ b/youtube_dl/extractor/instagram.py @@ -10,7 +10,8 @@ class InstagramIE(InfoExtractor): u'md5': u'0d2da106a9d2631273e192b372806516', u'info_dict': { u"uploader_id": u"naomipq", - u"title": u"Video by naomipq" + u"title": u"Video by naomipq", + u'description': u'md5:1f17f0ab29bd6fe2bfad705f58de3cb8', } } @@ -18,20 +19,17 @@ class InstagramIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group(1) webpage = self._download_webpage(url, video_id) - html_title = self._html_search_regex( - r'<title>(.+?)</title>', - webpage, u'title', flags=re.DOTALL) - title = re.sub(u'(?: *\(Videos?\))? \u2022 Instagram$', '', html_title).strip() - uploader_id = self._html_search_regex( - r'<div class="media-user" id="media_user">.*?<h2><a href="[^"]*">([^<]*)</a></h2>', - webpage, u'uploader id', fatal=False, flags=re.DOTALL) - ext = 'mp4' + uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"', + webpage, u'uploader id', fatal=False) + desc = self._search_regex(r'"caption":"(.*?)"', webpage, u'description', + fatal=False) return [{ 'id': video_id, 'url': self._og_search_video_url(webpage), - 'ext': ext, - 'title': title, + 'ext': 'mp4', + 'title': u'Video by %s' % uploader_id, 'thumbnail': self._og_search_thumbnail(webpage), - 'uploader_id' : uploader_id + 'uploader_id' : uploader_id, + 'description': desc, }] diff --git a/youtube_dl/extractor/internetvideoarchive.py b/youtube_dl/extractor/internetvideoarchive.py new file mode 100644 index 000000000..5986459d6 --- /dev/null +++ b/youtube_dl/extractor/internetvideoarchive.py @@ -0,0 +1,87 @@ +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( + compat_urlparse, + compat_urllib_parse, + xpath_with_ns, + determine_ext, +) + + +class InternetVideoArchiveIE(InfoExtractor): + _VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?' + + _TEST = { + u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247', + u'file': u'452693.mp4', + u'info_dict': { + u'title': u'SKYFALL', + u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.', + u'duration': 156, + }, + } + + @staticmethod + def _build_url(query): + return 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?' + query + + @staticmethod + def _clean_query(query): + NEEDED_ARGS = ['publishedid', 'customerid'] + query_dic = compat_urlparse.parse_qs(query) + cleaned_dic = dict((k,v[0]) for (k,v) in query_dic.items() if k in NEEDED_ARGS) + # Other player ids return m3u8 urls + cleaned_dic['playerid'] = '247' + cleaned_dic['videokbrate'] = '100000' + return compat_urllib_parse.urlencode(cleaned_dic) + + def _real_extract(self, url): + query = compat_urlparse.urlparse(url).query + query_dic = compat_urlparse.parse_qs(query) + video_id = query_dic['publishedid'][0] + url = self._build_url(query) + + flashconfiguration_xml = self._download_webpage(url, video_id, + u'Downloading flash configuration') + flashconfiguration = xml.etree.ElementTree.fromstring(flashconfiguration_xml.encode('utf-8')) + file_url = flashconfiguration.find('file').text + file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx') + # Replace some of the parameters in the query to get the best quality + # and http links (no m3u8 manifests) + file_url = re.sub(r'(?<=\?)(.+)$', + lambda m: self._clean_query(m.group()), + file_url) + info_xml = self._download_webpage(file_url, video_id, + u'Downloading video info') + info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) + item = info.find('channel/item') + + def _bp(p): + return xpath_with_ns(p, + {'media': 'http://search.yahoo.com/mrss/', + 'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'}) + formats = [] + for content in item.findall(_bp('media:group/media:content')): + attr = content.attrib + f_url = attr['url'] + formats.append({ + 'url': f_url, + 'ext': determine_ext(f_url), + 'width': int(attr['width']), + 'bitrate': int(attr['bitrate']), + }) + formats = sorted(formats, key=lambda f: f['bitrate']) + + info = { + 'id': video_id, + 'title': item.find('title').text, + 'formats': formats, + 'thumbnail': item.find(_bp('media:thumbnail')).attrib['url'], + 'description': item.find('description').text, + 'duration': int(attr['duration']), + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + return info diff --git a/youtube_dl/extractor/jeuxvideo.py b/youtube_dl/extractor/jeuxvideo.py new file mode 100644 index 000000000..6bb54b932 --- /dev/null +++ b/youtube_dl/extractor/jeuxvideo.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +import json +import re +import xml.etree.ElementTree + +from .common import InfoExtractor + + +class JeuxVideoIE(InfoExtractor): + _VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm' + + _TEST = { + u'url': u'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm', + u'file': u'5182.mp4', + u'md5': u'046e491afb32a8aaac1f44dd4ddd54ee', + u'info_dict': { + u'title': u'GC 2013 : Tearaway nous présente ses papiers d\'identité', + u'description': u'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + title = re.match(self._VALID_URL, url).group(1) + webpage = self._download_webpage(url, title) + xml_link = self._html_search_regex( + r'<param name="flashvars" value="config=(.*?)" />', + webpage, u'config URL') + + video_id = self._search_regex( + r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml', + xml_link, u'video ID') + + xml_config = self._download_webpage( + xml_link, title, u'Downloading XML config') + config = xml.etree.ElementTree.fromstring(xml_config.encode('utf-8')) + info_json = self._search_regex( + r'(?sm)<format\.json>(.*?)</format\.json>', + xml_config, u'JSON information') + info = json.loads(info_json)['versions'][0] + + video_url = 'http://video720.jeuxvideo.com/' + info['file'] + + return { + 'id': video_id, + 'title': config.find('titre_video').text, + 'ext': 'mp4', + 'url': video_url, + 'description': self._og_search_description(webpage), + 'thumbnail': config.find('image').text, + } diff --git a/youtube_dl/extractor/kankan.py b/youtube_dl/extractor/kankan.py new file mode 100644 index 000000000..445d46501 --- /dev/null +++ b/youtube_dl/extractor/kankan.py @@ -0,0 +1,39 @@ +import re + +from .common import InfoExtractor +from ..utils import determine_ext + + +class KankanIE(InfoExtractor): + _VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml' + + _TEST = { + u'url': u'http://yinyue.kankan.com/vod/48/48863.shtml', + u'file': u'48863.flv', + u'md5': u'29aca1e47ae68fc28804aca89f29507e', + u'info_dict': { + u'title': u'Ready To Go', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + + title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, u'video title') + surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0) + gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls) + gcid = gcids[-1] + + video_info_page = self._download_webpage('http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid, + video_id, u'Downloading video url info') + ip = self._search_regex(r'ip:"(.+?)"', video_info_page, u'video url ip') + path = self._search_regex(r'path:"(.+?)"', video_info_page, u'video url path') + video_url = 'http://%s%s' % (ip, path) + + return {'id': video_id, + 'title': title, + 'url': video_url, + 'ext': determine_ext(video_url), + } diff --git a/youtube_dl/extractor/keek.py b/youtube_dl/extractor/keek.py index dda78743d..a7b88d2d9 100644 --- a/youtube_dl/extractor/keek.py +++ b/youtube_dl/extractor/keek.py @@ -4,10 +4,10 @@ from .common import InfoExtractor class KeekIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)' + _VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)' IE_NAME = u'keek' _TEST = { - u'url': u'http://www.keek.com/ytdl/keeks/NODfbab', + u'url': u'https://www.keek.com/ytdl/keeks/NODfbab', u'file': u'NODfbab.mp4', u'md5': u'9b0636f8c0f7614afa4ea5e4c6e57e83', u'info_dict': { diff --git a/youtube_dl/extractor/kickstarter.py b/youtube_dl/extractor/kickstarter.py new file mode 100644 index 000000000..50bc883ef --- /dev/null +++ b/youtube_dl/extractor/kickstarter.py @@ -0,0 +1,37 @@ +import re + +from .common import InfoExtractor + + +class KickStarterIE(InfoExtractor): + _VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P<id>\d*)/.*' + _TEST = { + u"url": u"https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location", + u"file": u"1404461844.mp4", + u"md5": u"c81addca81327ffa66c642b5d8b08cab", + u"info_dict": { + u"title": u"Intersection: The Story of Josh Grant by Kyle Cowling", + }, + } + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + video_id = m.group('id') + webpage_src = self._download_webpage(url, video_id) + + video_url = self._search_regex(r'data-video="(.*?)">', + webpage_src, u'video URL') + if 'mp4' in video_url: + ext = 'mp4' + else: + ext = 'flv' + video_title = self._html_search_regex(r"<title>(.*?)</title>", + webpage_src, u'title').rpartition(u'\u2014 Kickstarter')[0].strip() + + results = [{ + 'id': video_id, + 'url': video_url, + 'title': video_title, + 'ext': ext, + }] + return results diff --git a/youtube_dl/extractor/livestream.py b/youtube_dl/extractor/livestream.py new file mode 100644 index 000000000..d04da98c8 --- /dev/null +++ b/youtube_dl/extractor/livestream.py @@ -0,0 +1,60 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urllib_parse_urlparse, + compat_urlparse, + get_meta_content, + ExtractorError, +) + + +class LivestreamIE(InfoExtractor): + _VALID_URL = r'http://new.livestream.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$' + _TEST = { + u'url': u'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370', + u'file': u'4719370.mp4', + u'md5': u'0d2186e3187d185a04b3cdd02b828836', + u'info_dict': { + u'title': u'Live from Webster Hall NYC', + u'upload_date': u'20121012', + } + } + + def _extract_video_info(self, video_data): + video_url = video_data.get('progressive_url_hd') or video_data.get('progressive_url') + return {'id': video_data['id'], + 'url': video_url, + 'ext': 'mp4', + 'title': video_data['caption'], + 'thumbnail': video_data['thumbnail_url'], + 'upload_date': video_data['updated_at'].replace('-','')[:8], + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + event_name = mobj.group('event_name') + webpage = self._download_webpage(url, video_id or event_name) + + if video_id is None: + # This is an event page: + player = get_meta_content('twitter:player', webpage) + if player is None: + raise ExtractorError('Couldn\'t extract event api url') + api_url = player.replace('/player', '') + api_url = re.sub(r'^(https?://)(new\.)', r'\1api.\2', api_url) + info = json.loads(self._download_webpage(api_url, event_name, + u'Downloading event info')) + videos = [self._extract_video_info(video_data['data']) + for video_data in info['feed']['data'] if video_data['type'] == u'video'] + return self.playlist_result(videos, info['id'], info['full_name']) + else: + og_video = self._og_search_video_url(webpage, name=u'player url') + query_str = compat_urllib_parse_urlparse(og_video).query + query = compat_urlparse.parse_qs(query_str) + api_url = query['play_url'][0].replace('.smil', '') + info = json.loads(self._download_webpage(api_url, video_id, + u'Downloading video info')) + return self._extract_video_info(info) diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py index 4c3f81b98..e537648ff 100644 --- a/youtube_dl/extractor/metacafe.py +++ b/youtube_dl/extractor/metacafe.py @@ -9,7 +9,7 @@ from ..utils import ( compat_urllib_parse, compat_urllib_request, compat_str, - + determine_ext, ExtractorError, ) @@ -20,7 +20,7 @@ class MetacafeIE(InfoExtractor): _DISCLAIMER = 'http://www.metacafe.com/family_filter/' _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' IE_NAME = u'metacafe' - _TEST = { + _TESTS = [{ u"add_ie": ["Youtube"], u"url": u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/", u"file": u"_aUehQsCQtM.flv", @@ -31,7 +31,16 @@ class MetacafeIE(InfoExtractor): u"uploader": u"PBS", u"uploader_id": u"PBS" } - } + }, + { + u"url": u"http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/", + u"file": u"an-dVVXnuY7Jh77J.mp4", + u"info_dict": { + u"title": u"The Andromeda Strain (1971): Stop the Bomb Part 3", + u"uploader": u"anyclip", + u"description": u"md5:38c711dd98f5bb87acf973d573442e67" + } + }] def report_disclaimer(self): @@ -73,14 +82,16 @@ class MetacafeIE(InfoExtractor): return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')] # Retrieve video webpage to extract further information - webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id) + req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) + req.headers['Cookie'] = 'flashVersion=0;' + webpage = self._download_webpage(req, video_id) # Extract URL, uploader and title from webpage self.report_extraction(video_id) mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) if mobj is not None: mediaURL = compat_urllib_parse.unquote(mobj.group(1)) - video_extension = mediaURL[-3:] + video_ext = mediaURL[-3:] # Extract gdaKey if available mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) @@ -90,34 +101,37 @@ class MetacafeIE(InfoExtractor): gdaKey = mobj.group(1) video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) else: - mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) - if mobj is None: - raise ExtractorError(u'Unable to extract media URL') - vardict = compat_parse_qs(mobj.group(1)) - if 'mediaData' not in vardict: - raise ExtractorError(u'Unable to extract media URL') - mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0]) - if mobj is None: - raise ExtractorError(u'Unable to extract media URL') - mediaURL = mobj.group('mediaURL').replace('\\/', '/') - video_extension = mediaURL[-3:] - video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key')) - - mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage) - if mobj is None: - raise ExtractorError(u'Unable to extract title') - video_title = mobj.group(1).decode('utf-8') - - mobj = re.search(r'submitter=(.*?);', webpage) - if mobj is None: - raise ExtractorError(u'Unable to extract uploader nickname') - video_uploader = mobj.group(1) - - return [{ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), + mobj = re.search(r'<video src="([^"]+)"', webpage) + if mobj: + video_url = mobj.group(1) + video_ext = 'mp4' + else: + mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) + if mobj is None: + raise ExtractorError(u'Unable to extract media URL') + vardict = compat_parse_qs(mobj.group(1)) + if 'mediaData' not in vardict: + raise ExtractorError(u'Unable to extract media URL') + mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0]) + if mobj is None: + raise ExtractorError(u'Unable to extract media URL') + mediaURL = mobj.group('mediaURL').replace('\\/', '/') + video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key')) + video_ext = determine_ext(video_url) + + video_title = self._html_search_regex(r'(?im)<title>(.*) - Video</title>', webpage, u'title') + description = self._og_search_description(webpage) + video_uploader = self._html_search_regex( + r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);', + webpage, u'uploader nickname', fatal=False) + + return { + '_type': 'video', + 'id': video_id, + 'url': video_url, + 'description': description, + 'uploader': video_uploader, 'upload_date': None, 'title': video_title, - 'ext': video_extension.decode('utf-8'), - }] + 'ext': video_ext, + } diff --git a/youtube_dl/extractor/metacritic.py b/youtube_dl/extractor/metacritic.py new file mode 100644 index 000000000..449138b56 --- /dev/null +++ b/youtube_dl/extractor/metacritic.py @@ -0,0 +1,55 @@ +import re +import xml.etree.ElementTree +import operator + +from .common import InfoExtractor + + +class MetacriticIE(InfoExtractor): + _VALID_URL = r'https?://www\.metacritic\.com/.+?/trailers/(?P<id>\d+)' + + _TEST = { + u'url': u'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222', + u'file': u'3698222.mp4', + u'info_dict': { + u'title': u'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors', + u'description': u'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.', + u'duration': 221, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + # The xml is not well formatted, there are raw '&' + info_xml = self._download_webpage('http://www.metacritic.com/video_data?video=' + video_id, + video_id, u'Downloading info xml').replace('&', '&') + info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) + + clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id) + formats = [] + for videoFile in clip.findall('httpURI/videoFile'): + rate_str = videoFile.find('rate').text + video_url = videoFile.find('filePath').text + formats.append({ + 'url': video_url, + 'ext': 'mp4', + 'format_id': rate_str, + 'rate': int(rate_str), + }) + formats.sort(key=operator.itemgetter('rate')) + + description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>', + webpage, u'description', flags=re.DOTALL) + + info = { + 'id': video_id, + 'title': clip.find('title').text, + 'formats': formats, + 'description': description, + 'duration': int(clip.find('duration').text), + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + return info diff --git a/youtube_dl/extractor/mit.py b/youtube_dl/extractor/mit.py new file mode 100644 index 000000000..52be9232f --- /dev/null +++ b/youtube_dl/extractor/mit.py @@ -0,0 +1,74 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( + clean_html, + get_element_by_id, +) + + +class TechTVMITIE(InfoExtractor): + IE_NAME = u'techtv.mit.edu' + _VALID_URL = r'https?://techtv\.mit\.edu/(videos|embeds)/(?P<id>\d+)' + + _TEST = { + u'url': u'http://techtv.mit.edu/videos/25418-mit-dna-learning-center-set', + u'file': u'25418.mp4', + u'md5': u'1f8cb3e170d41fd74add04d3c9330e5f', + u'info_dict': { + u'title': u'MIT DNA Learning Center Set', + u'description': u'md5:82313335e8a8a3f243351ba55bc1b474', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + raw_page = self._download_webpage( + 'http://techtv.mit.edu/videos/%s' % video_id, video_id) + clean_page = re.compile(u'<!--.*?-->', re.S).sub(u'', raw_page) + + base_url = self._search_regex(r'ipadUrl: \'(.+?cloudfront.net/)', + raw_page, u'base url') + formats_json = self._search_regex(r'bitrates: (\[.+?\])', raw_page, + u'video formats') + formats = json.loads(formats_json) + formats = sorted(formats, key=lambda f: f['bitrate']) + + title = get_element_by_id('edit-title', clean_page) + description = clean_html(get_element_by_id('edit-description', clean_page)) + thumbnail = self._search_regex(r'playlist:.*?url: \'(.+?)\'', + raw_page, u'thumbnail', flags=re.DOTALL) + + return {'id': video_id, + 'title': title, + 'url': base_url + formats[-1]['url'].replace('mp4:', ''), + 'ext': 'mp4', + 'description': description, + 'thumbnail': thumbnail, + } + + +class MITIE(TechTVMITIE): + IE_NAME = u'video.mit.edu' + _VALID_URL = r'https?://video\.mit\.edu/watch/(?P<title>[^/]+)' + + _TEST = { + u'url': u'http://video.mit.edu/watch/the-government-is-profiling-you-13222/', + u'file': u'21783.mp4', + u'md5': u'7db01d5ccc1895fc5010e9c9e13648da', + u'info_dict': { + u'title': u'The Government is Profiling You', + u'description': u'md5:ad5795fe1e1623b73620dbfd47df9afd', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + page_title = mobj.group('title') + webpage = self._download_webpage(url, page_title) + self.to_screen('%s: Extracting %s url' % (page_title, TechTVMITIE.IE_NAME)) + embed_url = self._search_regex(r'<iframe .*?src="(.+?)"', webpage, + u'embed url') + return self.url_result(embed_url, ie='TechTVMIT') diff --git a/youtube_dl/extractor/mixcloud.py b/youtube_dl/extractor/mixcloud.py index 8245b5583..a200dcd74 100644 --- a/youtube_dl/extractor/mixcloud.py +++ b/youtube_dl/extractor/mixcloud.py @@ -5,34 +5,27 @@ import socket from .common import InfoExtractor from ..utils import ( compat_http_client, - compat_str, compat_urllib_error, compat_urllib_request, - - ExtractorError, + unified_strdate, ) class MixcloudIE(InfoExtractor): - _WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/ _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)' IE_NAME = u'mixcloud' - def report_download_json(self, file_id): - """Report JSON download.""" - self.to_screen(u'Downloading json') - - def get_urls(self, jsonData, fmt, bitrate='best'): - """Get urls from 'audio_formats' section in json""" - try: - bitrate_list = jsonData[fmt] - if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list: - bitrate = max(bitrate_list) # select highest - - url_list = jsonData[fmt][bitrate] - except TypeError: # we have no bitrate info. - url_list = jsonData[fmt] - return url_list + _TEST = { + u'url': u'http://www.mixcloud.com/dholbach/cryptkeeper/', + u'file': u'dholbach-cryptkeeper.mp3', + u'info_dict': { + u'title': u'Cryptkeeper', + u'description': u'After quite a long silence from myself, finally another Drum\'n\'Bass mix with my favourite current dance floor bangers.', + u'uploader': u'Daniel Holbach', + u'uploader_id': u'dholbach', + u'upload_date': u'20111115', + }, + } def check_urls(self, url_list): """Returns 1st active url from list""" @@ -45,71 +38,32 @@ class MixcloudIE(InfoExtractor): return None - def _print_formats(self, formats): - print('Available formats:') - for fmt in formats.keys(): - for b in formats[fmt]: - try: - ext = formats[fmt][b][0] - print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1])) - except TypeError: # we have no bitrate info - ext = formats[fmt][0] - print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1])) - break - def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) - # extract uploader & filename from url - uploader = mobj.group(1).decode('utf-8') - file_id = uploader + "-" + mobj.group(2).decode('utf-8') - - # construct API request - file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json' - # retrieve .json file with links to files - request = compat_urllib_request.Request(file_url) - try: - self.report_download_json(file_url) - jsonData = compat_urllib_request.urlopen(request).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise ExtractorError(u'Unable to retrieve file: %s' % compat_str(err)) - - # parse JSON - json_data = json.loads(jsonData) - player_url = json_data['player_swf_url'] - formats = dict(json_data['audio_formats']) - - req_format = self._downloader.params.get('format', None) - - if self._downloader.params.get('listformats', None): - self._print_formats(formats) - return - - if req_format is None or req_format == 'best': - for format_param in formats.keys(): - url_list = self.get_urls(formats, format_param) - # check urls - file_url = self.check_urls(url_list) - if file_url is not None: - break # got it! - else: - if req_format not in formats: - raise ExtractorError(u'Format is not available') - - url_list = self.get_urls(formats, req_format) - file_url = self.check_urls(url_list) - format_param = req_format - return [{ - 'id': file_id.decode('utf-8'), - 'url': file_url.decode('utf-8'), - 'uploader': uploader.decode('utf-8'), - 'upload_date': None, - 'title': json_data['name'], - 'ext': file_url.split('.')[-1].decode('utf-8'), - 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), - 'thumbnail': json_data['thumbnail_url'], - 'description': json_data['description'], - 'player_url': player_url.decode('utf-8'), - }] + uploader = mobj.group(1) + cloudcast_name = mobj.group(2) + track_id = '-'.join((uploader, cloudcast_name)) + api_url = 'http://api.mixcloud.com/%s/%s/' % (uploader, cloudcast_name) + webpage = self._download_webpage(url, track_id) + json_data = self._download_webpage(api_url, track_id, + u'Downloading cloudcast info') + info = json.loads(json_data) + + preview_url = self._search_regex(r'data-preview-url="(.+?)"', webpage, u'preview url') + song_url = preview_url.replace('/previews/', '/cloudcasts/originals/') + template_url = re.sub(r'(stream\d*)', 'stream%d', song_url) + final_song_url = self.check_urls(template_url % i for i in range(30)) + + return { + 'id': track_id, + 'title': info['name'], + 'url': final_song_url, + 'ext': 'mp3', + 'description': info['description'], + 'thumbnail': info['pictures'].get('extra_large'), + 'uploader': info['user']['name'], + 'uploader_id': info['user']['username'], + 'upload_date': unified_strdate(info['created_time']), + 'view_count': info['play_count'], + } diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index 969db7113..e520e2bb4 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -1,28 +1,119 @@ import re -import socket import xml.etree.ElementTree from .common import InfoExtractor from ..utils import ( - compat_http_client, - compat_str, - compat_urllib_error, - compat_urllib_request, - + compat_urllib_parse, ExtractorError, ) +def _media_xml_tag(tag): + return '{http://search.yahoo.com/mrss/}%s' % tag class MTVIE(InfoExtractor): - _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$' - _WORKING = False + _VALID_URL = r'^https?://(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$' + + _FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/' + + _TESTS = [ + { + u'url': u'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml', + u'file': u'853555.mp4', + u'md5': u'850f3f143316b1e71fa56a4edfd6e0f8', + u'info_dict': { + u'title': u'Taylor Swift - "Ours (VH1 Storytellers)"', + u'description': u'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.', + }, + }, + { + u'url': u'http://www.mtv.com/videos/taylor-swift/916187/everything-has-changed-ft-ed-sheeran.jhtml', + u'file': u'USCJY1331283.mp4', + u'md5': u'73b4e7fcadd88929292fe52c3ced8caf', + u'info_dict': { + u'title': u'Everything Has Changed', + u'upload_date': u'20130606', + u'uploader': u'Taylor Swift', + }, + u'skip': u'VEVO is only available in some countries', + }, + ] + + @staticmethod + def _id_from_uri(uri): + return uri.split(':')[-1] + + # This was originally implemented for ComedyCentral, but it also works here + @staticmethod + def _transform_rtmp_url(rtmp_video_url): + m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url) + if not m: + raise ExtractorError(u'Cannot transform RTMP url') + base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/' + return base + m.group('finalid') + + def _get_thumbnail_url(self, uri, itemdoc): + return 'http://mtv.mtvnimages.com/uri/' + uri + + def _extract_video_formats(self, metadataXml): + if '/error_country_block.swf' in metadataXml: + raise ExtractorError(u'This video is not available from your country.', expected=True) + mdoc = xml.etree.ElementTree.fromstring(metadataXml.encode('utf-8')) + renditions = mdoc.findall('.//rendition') + + formats = [] + for rendition in mdoc.findall('.//rendition'): + try: + _, _, ext = rendition.attrib['type'].partition('/') + rtmp_video_url = rendition.find('./src').text + formats.append({'ext': ext, + 'url': self._transform_rtmp_url(rtmp_video_url), + 'format_id': rendition.get('bitrate'), + 'width': int(rendition.get('width')), + 'height': int(rendition.get('height')), + }) + except (KeyError, TypeError): + raise ExtractorError('Invalid rendition field.') + return formats + + def _get_video_info(self, itemdoc): + uri = itemdoc.find('guid').text + video_id = self._id_from_uri(uri) + self.report_extraction(video_id) + mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url'] + if 'acceptMethods' not in mediagen_url: + mediagen_url += '&acceptMethods=fms' + mediagen_page = self._download_webpage(mediagen_url, video_id, + u'Downloading video urls') + + description_node = itemdoc.find('description') + if description_node is not None: + description = description_node.text.strip() + else: + description = None + + info = { + 'title': itemdoc.find('title').text, + 'formats': self._extract_video_formats(mediagen_page), + 'id': video_id, + 'thumbnail': self._get_thumbnail_url(uri, itemdoc), + 'description': description, + } + + # TODO: Remove when #980 has been merged + info.update(info['formats'][-1]) + + return info + + def _get_videos_info(self, uri): + video_id = self._id_from_uri(uri) + data = compat_urllib_parse.urlencode({'uri': uri}) + infoXml = self._download_webpage(self._FEED_URL +'?' + data, video_id, + u'Downloading info') + idoc = xml.etree.ElementTree.fromstring(infoXml.encode('utf-8')) + return [self._get_video_info(item) for item in idoc.findall('.//item')] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) - if not mobj.group('proto'): - url = 'http://' + url video_id = mobj.group('videoid') webpage = self._download_webpage(url, video_id) @@ -35,46 +126,5 @@ class MTVIE(InfoExtractor): self.to_screen(u'Vevo video detected: %s' % vevo_id) return self.url_result('vevo:%s' % vevo_id, ie='Vevo') - #song_name = self._html_search_regex(r'<meta name="mtv_vt" content="([^"]+)"/>', - # webpage, u'song name', fatal=False) - - video_title = self._html_search_regex(r'<meta name="mtv_an" content="([^"]+)"/>', - webpage, u'title') - - mtvn_uri = self._html_search_regex(r'<meta name="mtvn_uri" content="([^"]+)"/>', - webpage, u'mtvn_uri', fatal=False) - - content_id = self._search_regex(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', - webpage, u'content id', fatal=False) - - videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri - self.report_extraction(video_id) - request = compat_urllib_request.Request(videogen_url) - try: - metadataXml = compat_urllib_request.urlopen(request).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise ExtractorError(u'Unable to download video metadata: %s' % compat_str(err)) - - mdoc = xml.etree.ElementTree.fromstring(metadataXml) - renditions = mdoc.findall('.//rendition') - - # For now, always pick the highest quality. - rendition = renditions[-1] - - try: - _,_,ext = rendition.attrib['type'].partition('/') - format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate'] - video_url = rendition.find('./src').text - except KeyError: - raise ExtractorError('Invalid rendition field.') - - info = { - 'id': video_id, - 'url': video_url, - 'upload_date': None, - 'title': video_title, - 'ext': ext, - 'format': format, - } - - return [info] + uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, u'uri') + return self._get_videos_info(uri) diff --git a/youtube_dl/extractor/muzu.py b/youtube_dl/extractor/muzu.py new file mode 100644 index 000000000..03e31ea1c --- /dev/null +++ b/youtube_dl/extractor/muzu.py @@ -0,0 +1,64 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urllib_parse, + determine_ext, +) + + +class MuzuTVIE(InfoExtractor): + _VALID_URL = r'https?://www.muzu.tv/(.+?)/(.+?)/(?P<id>\d+)' + IE_NAME = u'muzu.tv' + + _TEST = { + u'url': u'http://www.muzu.tv/defected/marcashken-featuring-sos-cat-walk-original-mix-music-video/1981454/', + u'file': u'1981454.mp4', + u'md5': u'98f8b2c7bc50578d6a0364fff2bfb000', + u'info_dict': { + u'title': u'Cat Walk (Original Mix)', + u'description': u'md5:90e868994de201b2570e4e5854e19420', + u'uploader': u'MarcAshken featuring SOS', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + info_data = compat_urllib_parse.urlencode({'format': 'json', + 'url': url, + }) + video_info_page = self._download_webpage('http://www.muzu.tv/api/oembed/?%s' % info_data, + video_id, u'Downloading video info') + info = json.loads(video_info_page) + + player_info_page = self._download_webpage('http://player.muzu.tv/player/playerInit?ai=%s' % video_id, + video_id, u'Downloading player info') + video_info = json.loads(player_info_page)['videos'][0] + for quality in ['1080' , '720', '480', '360']: + if video_info.get('v%s' % quality): + break + + data = compat_urllib_parse.urlencode({'ai': video_id, + # Even if each time you watch a video the hash changes, + # it seems to work for different videos, and it will work + # even if you use any non empty string as a hash + 'viewhash': 'VBNff6djeV4HV5TRPW5kOHub2k', + 'device': 'web', + 'qv': quality, + }) + video_url_page = self._download_webpage('http://player.muzu.tv/player/requestVideo?%s' % data, + video_id, u'Downloading video url') + video_url_info = json.loads(video_url_page) + video_url = video_url_info['url'] + + return {'id': video_id, + 'title': info['title'], + 'url': video_url, + 'ext': determine_ext(video_url), + 'thumbnail': info['thumbnail_url'], + 'description': info['description'], + 'uploader': info['author_name'], + } diff --git a/youtube_dl/extractor/myvideo.py b/youtube_dl/extractor/myvideo.py index b2a7b1df0..0404e6e43 100644 --- a/youtube_dl/extractor/myvideo.py +++ b/youtube_dl/extractor/myvideo.py @@ -2,11 +2,13 @@ import binascii import base64 import hashlib import re +import json from .common import InfoExtractor from ..utils import ( compat_ord, compat_urllib_parse, + compat_urllib_request, ExtractorError, ) @@ -16,7 +18,7 @@ from ..utils import ( class MyVideoIE(InfoExtractor): """Information Extractor for myvideo.de.""" - _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*' + _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/(?:[^/]+/)?watch/([0-9]+)/([^?/]+).*' IE_NAME = u'myvideo' _TEST = { u'url': u'http://www.myvideo.de/watch/8229274/bowling_fail_or_win', @@ -85,6 +87,20 @@ class MyVideoIE(InfoExtractor): 'ext': video_ext, }] + mobj = re.search(r'data-video-service="/service/data/video/%s/config' % video_id, webpage) + if mobj is not None: + request = compat_urllib_request.Request('http://www.myvideo.de/service/data/video/%s/config' % video_id, '') + response = self._download_webpage(request, video_id, + u'Downloading video info') + info = json.loads(base64.b64decode(response).decode('utf-8')) + return {'id': video_id, + 'title': info['title'], + 'url': info['streaming_url'].replace('rtmpe', 'rtmpt'), + 'play_path': info['filename'], + 'ext': 'flv', + 'thumbnail': info['thumbnail'][0]['url'], + } + # try encxml mobj = re.search('var flashvars={(.+?)}', webpage) if mobj is None: diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py new file mode 100644 index 000000000..9df236d69 --- /dev/null +++ b/youtube_dl/extractor/naver.py @@ -0,0 +1,73 @@ +# encoding: utf-8 +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( + compat_urllib_parse, + ExtractorError, +) + + +class NaverIE(InfoExtractor): + _VALID_URL = r'https?://tvcast\.naver\.com/v/(?P<id>\d+)' + + _TEST = { + u'url': u'http://tvcast.naver.com/v/81652', + u'file': u'81652.mp4', + u'info_dict': { + u'title': u'[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번', + u'description': u'합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.', + u'upload_date': u'20130903', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group(1) + webpage = self._download_webpage(url, video_id) + m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"', + webpage) + if m_id is None: + raise ExtractorError(u'couldn\'t extract vid and key') + vid = m_id.group(1) + key = m_id.group(2) + query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key,}) + query_urls = compat_urllib_parse.urlencode({ + 'masterVid': vid, + 'protocol': 'p2p', + 'inKey': key, + }) + info_xml = self._download_webpage( + 'http://serviceapi.rmcnmv.naver.com/flash/videoInfo.nhn?' + query, + video_id, u'Downloading video info') + urls_xml = self._download_webpage( + 'http://serviceapi.rmcnmv.naver.com/flash/playableEncodingOption.nhn?' + query_urls, + video_id, u'Downloading video formats info') + info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')) + urls = xml.etree.ElementTree.fromstring(urls_xml.encode('utf-8')) + + formats = [] + for format_el in urls.findall('EncodingOptions/EncodingOption'): + domain = format_el.find('Domain').text + if domain.startswith('rtmp'): + continue + formats.append({ + 'url': domain + format_el.find('uri').text, + 'ext': 'mp4', + 'width': int(format_el.find('width').text), + 'height': int(format_el.find('height').text), + }) + + info = { + 'id': video_id, + 'title': info.find('Subject').text, + 'formats': formats, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'upload_date': info.find('WriteDate').text.replace('.', ''), + 'view_count': int(info.find('PlayCount').text), + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + return info diff --git a/youtube_dl/extractor/nbc.py b/youtube_dl/extractor/nbc.py new file mode 100644 index 000000000..3bc9dae6d --- /dev/null +++ b/youtube_dl/extractor/nbc.py @@ -0,0 +1,33 @@ +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import find_xpath_attr, compat_str + + +class NBCNewsIE(InfoExtractor): + _VALID_URL = r'https?://www\.nbcnews\.com/video/.+?/(?P<id>\d+)' + + _TEST = { + u'url': u'http://www.nbcnews.com/video/nbc-news/52753292', + u'file': u'52753292.flv', + u'md5': u'47abaac93c6eaf9ad37ee6c4463a5179', + u'info_dict': { + u'title': u'Crew emerges after four-month Mars food study', + u'description': u'md5:24e632ffac72b35f8b67a12d1b6ddfc1', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + info_xml = self._download_webpage('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, video_id) + info = xml.etree.ElementTree.fromstring(info_xml.encode('utf-8')).find('video') + + return {'id': video_id, + 'title': info.find('headline').text, + 'ext': 'flv', + 'url': find_xpath_attr(info, 'media', 'type', 'flashVideo').text, + 'description': compat_str(info.find('caption').text), + 'thumbnail': find_xpath_attr(info, 'media', 'type', 'thumbnail').text, + } diff --git a/youtube_dl/extractor/newgrounds.py b/youtube_dl/extractor/newgrounds.py new file mode 100644 index 000000000..2ef80bce0 --- /dev/null +++ b/youtube_dl/extractor/newgrounds.py @@ -0,0 +1,38 @@ +import json +import re + +from .common import InfoExtractor +from ..utils import determine_ext + + +class NewgroundsIE(InfoExtractor): + _VALID_URL = r'(?:https?://)?(?:www\.)?newgrounds\.com/audio/listen/(?P<id>\d+)' + _TEST = { + u'url': u'http://www.newgrounds.com/audio/listen/549479', + u'file': u'549479.mp3', + u'md5': u'fe6033d297591288fa1c1f780386f07a', + u'info_dict': { + u"title": u"B7 - BusMode", + u"uploader": u"Burn7", + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + music_id = mobj.group('id') + webpage = self._download_webpage(url, music_id) + + title = self._html_search_regex(r',"name":"([^"]+)",', webpage, u'music title') + uploader = self._html_search_regex(r',"artist":"([^"]+)",', webpage, u'music uploader') + + music_url_json_string = self._html_search_regex(r'({"url":"[^"]+"),', webpage, u'music url') + '}' + music_url_json = json.loads(music_url_json_string) + music_url = music_url_json['url'] + + return { + 'id': music_id, + 'title': title, + 'url': music_url, + 'uploader': uploader, + 'ext': determine_ext(music_url), + } diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py new file mode 100644 index 000000000..e8d43dd13 --- /dev/null +++ b/youtube_dl/extractor/nhl.py @@ -0,0 +1,120 @@ +import re +import json +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( + compat_urlparse, + compat_urllib_parse, + determine_ext, + unified_strdate, +) + + +class NHLBaseInfoExtractor(InfoExtractor): + @staticmethod + def _fix_json(json_string): + return json_string.replace('\\\'', '\'') + + def _extract_video(self, info): + video_id = info['id'] + self.report_extraction(video_id) + + initial_video_url = info['publishPoint'] + data = compat_urllib_parse.urlencode({ + 'type': 'fvod', + 'path': initial_video_url.replace('.mp4', '_sd.mp4'), + }) + path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data + path_response = self._download_webpage(path_url, video_id, + u'Downloading final video url') + path_doc = xml.etree.ElementTree.fromstring(path_response) + video_url = path_doc.find('path').text + + join = compat_urlparse.urljoin + return { + 'id': video_id, + 'title': info['name'], + 'url': video_url, + 'ext': determine_ext(video_url), + 'description': info['description'], + 'duration': int(info['duration']), + 'thumbnail': join(join(video_url, '/u/'), info['bigImage']), + 'upload_date': unified_strdate(info['releaseDate'].split('.')[0]), + } + + +class NHLIE(NHLBaseInfoExtractor): + IE_NAME = u'nhl.com' + _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console\?.*?(?<=[?&])id=(?P<id>\d+)' + + _TEST = { + u'url': u'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614', + u'file': u'453614.mp4', + u'info_dict': { + u'title': u'Quick clip: Weise 4-3 goal vs Flames', + u'description': u'Dale Weise scores his first of the season to put the Canucks up 4-3.', + u'duration': 18, + u'upload_date': u'20131006', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id + info_json = self._download_webpage(json_url, video_id, + u'Downloading info json') + info_json = self._fix_json(info_json) + info = json.loads(info_json)[0] + return self._extract_video(info) + + +class NHLVideocenterIE(NHLBaseInfoExtractor): + IE_NAME = u'nhl.com:videocenter' + IE_DESC = u'Download the first 12 videos from a videocenter category' + _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?.*?catid=(?P<catid>[^&]+))?' + + @classmethod + def suitable(cls, url): + if NHLIE.suitable(url): + return False + return super(NHLVideocenterIE, cls).suitable(url) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + team = mobj.group('team') + webpage = self._download_webpage(url, team) + cat_id = self._search_regex( + [r'var defaultCatId = "(.+?)";', + r'{statusIndex:0,index:0,.*?id:(.*?),'], + webpage, u'category id') + playlist_title = self._html_search_regex( + r'\?catid=%s">(.*?)</a>' % cat_id, + webpage, u'playlist title', flags=re.DOTALL) + + data = compat_urllib_parse.urlencode({ + 'cid': cat_id, + # This is the default value + 'count': 12, + 'ptrs': 3, + 'format': 'json', + }) + path = '/videocenter/servlets/browse?' + data + request_url = compat_urlparse.urljoin(url, path) + response = self._download_webpage(request_url, playlist_title) + response = self._fix_json(response) + if not response.strip(): + self._downloader.report_warning(u'Got an empty reponse, trying ' + u'adding the "newvideos" parameter') + response = self._download_webpage(request_url + '&newvideos=true', + playlist_title) + response = self._fix_json(response) + videos = json.loads(response) + + return { + '_type': 'playlist', + 'title': playlist_title, + 'id': cat_id, + 'entries': [self._extract_video(i) for i in videos], + } diff --git a/youtube_dl/extractor/nowvideo.py b/youtube_dl/extractor/nowvideo.py new file mode 100644 index 000000000..ab52ad401 --- /dev/null +++ b/youtube_dl/extractor/nowvideo.py @@ -0,0 +1,43 @@ +import re + +from .common import InfoExtractor +from ..utils import compat_urlparse + + +class NowVideoIE(InfoExtractor): + _VALID_URL = r'(?:https?://)?(?:www\.)?nowvideo\.ch/video/(?P<id>\w+)' + _TEST = { + u'url': u'http://www.nowvideo.ch/video/0mw0yow7b6dxa', + u'file': u'0mw0yow7b6dxa.flv', + u'md5': u'f8fbbc8add72bd95b7850c6a02fc8817', + u'info_dict': { + u"title": u"youtubedl test video _BaW_jenozKc.mp4" + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + video_id = mobj.group('id') + webpage_url = 'http://www.nowvideo.ch/video/' + video_id + webpage = self._download_webpage(webpage_url, video_id) + + self.report_extraction(video_id) + + video_title = self._html_search_regex(r'<h4>(.*)</h4>', + webpage, u'video title') + + video_key = self._search_regex(r'var fkzd="(.*)";', + webpage, u'video key') + + api_call = "http://www.nowvideo.ch/api/player.api.php?file={0}&numOfErrors=0&cid=1&key={1}".format(video_id, video_key) + api_response = self._download_webpage(api_call, video_id, + u'Downloading API page') + video_url = compat_urlparse.parse_qs(api_response)[u'url'][0] + + return [{ + 'id': video_id, + 'url': video_url, + 'ext': 'flv', + 'title': video_title, + }] diff --git a/youtube_dl/extractor/ooyala.py b/youtube_dl/extractor/ooyala.py new file mode 100644 index 000000000..1f7b4d2e7 --- /dev/null +++ b/youtube_dl/extractor/ooyala.py @@ -0,0 +1,58 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import unescapeHTML + +class OoyalaIE(InfoExtractor): + _VALID_URL = r'https?://.+?\.ooyala\.com/.*?embedCode=(?P<id>.+?)(&|$)' + + _TEST = { + # From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video + u'url': u'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8', + u'file': u'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8.mp4', + u'md5': u'3f5cceb3a7bf461d6c29dc466cf8033c', + u'info_dict': { + u'title': u'Explaining Data Recovery from Hard Drives and SSDs', + u'description': u'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.', + }, + } + + @staticmethod + def _url_for_embed_code(embed_code): + return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code + + def _extract_result(self, info, more_info): + return {'id': info['embedCode'], + 'ext': 'mp4', + 'title': unescapeHTML(info['title']), + 'url': info.get('ipad_url') or info['url'], + 'description': unescapeHTML(more_info['description']), + 'thumbnail': more_info['promo'], + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + embedCode = mobj.group('id') + player_url = 'http://player.ooyala.com/player.js?embedCode=%s' % embedCode + player = self._download_webpage(player_url, embedCode) + mobile_url = self._search_regex(r'mobile_player_url="(.+?)&device="', + player, u'mobile player url') + mobile_player = self._download_webpage(mobile_url, embedCode) + videos_info = self._search_regex( + r'var streams=window.oo_testEnv\?\[\]:eval\("\((\[{.*?}\])\)"\);', + mobile_player, u'info').replace('\\"','"') + videos_more_info = self._search_regex(r'eval\("\(({.*?\\"promo\\".*?})\)"', mobile_player, u'more info').replace('\\"','"') + videos_info = json.loads(videos_info) + videos_more_info =json.loads(videos_more_info) + + if videos_more_info.get('lineup'): + videos = [self._extract_result(info, more_info) for (info, more_info) in zip(videos_info, videos_more_info['lineup'])] + return {'_type': 'playlist', + 'id': embedCode, + 'title': unescapeHTML(videos_more_info['title']), + 'entries': videos, + } + else: + return self._extract_result(videos_info[0], videos_more_info) + diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py new file mode 100644 index 000000000..cfca2a063 --- /dev/null +++ b/youtube_dl/extractor/orf.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +import re +import xml.etree.ElementTree +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urlparse, + ExtractorError, + find_xpath_attr, +) + +class ORFIE(InfoExtractor): + _VALID_URL = r'https?://tvthek.orf.at/(programs/.+?/episodes|topics/.+?)/(?P<id>\d+)' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + playlist_id = mobj.group('id') + webpage = self._download_webpage(url, playlist_id) + + flash_xml = self._search_regex('ORF.flashXML = \'(.+?)\'', webpage, u'flash xml') + flash_xml = compat_urlparse.parse_qs('xml='+flash_xml)['xml'][0] + flash_config = xml.etree.ElementTree.fromstring(flash_xml.encode('utf-8')) + playlist_json = self._search_regex(r'playlist\': \'(\[.*?\])\'', webpage, u'playlist').replace(r'\"','"') + playlist = json.loads(playlist_json) + + videos = [] + ns = '{http://tempuri.org/XMLSchema.xsd}' + xpath = '%(ns)sPlaylist/%(ns)sItems/%(ns)sItem' % {'ns': ns} + webpage_description = self._og_search_description(webpage) + for (i, (item, info)) in enumerate(zip(flash_config.findall(xpath), playlist), 1): + # Get best quality url + rtmp_url = None + for q in ['Q6A', 'Q4A', 'Q1A']: + video_url = find_xpath_attr(item, '%sVideoUrl' % ns, 'quality', q) + if video_url is not None: + rtmp_url = video_url.text + break + if rtmp_url is None: + raise ExtractorError(u'Couldn\'t get video url: %s' % info['id']) + description = self._html_search_regex( + r'id="playlist_entry_%s".*?<p>(.*?)</p>' % i, webpage, + u'description', default=webpage_description, flags=re.DOTALL) + videos.append({ + '_type': 'video', + 'id': info['id'], + 'title': info['title'], + 'url': rtmp_url, + 'ext': 'flv', + 'description': description, + }) + + return videos diff --git a/youtube_dl/extractor/pbs.py b/youtube_dl/extractor/pbs.py new file mode 100644 index 000000000..65462d867 --- /dev/null +++ b/youtube_dl/extractor/pbs.py @@ -0,0 +1,34 @@ +import re +import json + +from .common import InfoExtractor + + +class PBSIE(InfoExtractor): + _VALID_URL = r'https?://video.pbs.org/video/(?P<id>\d+)/?' + + _TEST = { + u'url': u'http://video.pbs.org/video/2365006249/', + u'file': u'2365006249.mp4', + u'md5': 'ce1888486f0908d555a8093cac9a7362', + u'info_dict': { + u'title': u'A More Perfect Union', + u'description': u'md5:ba0c207295339c8d6eced00b7c363c6a', + u'duration': 3190, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + info_url = 'http://video.pbs.org/videoInfo/%s?format=json' % video_id + info_page = self._download_webpage(info_url, video_id) + info =json.loads(info_page) + return {'id': video_id, + 'title': info['title'], + 'url': info['alternate_encoding']['url'], + 'ext': 'mp4', + 'description': info['program'].get('description'), + 'thumbnail': info.get('image_url'), + 'duration': info.get('duration'), + } diff --git a/youtube_dl/extractor/pornotube.py b/youtube_dl/extractor/pornotube.py index add76a11e..5d770ec28 100644 --- a/youtube_dl/extractor/pornotube.py +++ b/youtube_dl/extractor/pornotube.py @@ -38,6 +38,7 @@ class PornotubeIE(InfoExtractor): VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by' upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False) if upload_date: upload_date = unified_strdate(upload_date) + age_limit = self._rta_search(webpage) info = {'id': video_id, 'url': video_url, @@ -45,6 +46,7 @@ class PornotubeIE(InfoExtractor): 'upload_date': upload_date, 'title': video_title, 'ext': 'flv', - 'format': 'flv'} + 'format': 'flv', + 'age_limit': age_limit} return [info] diff --git a/youtube_dl/extractor/redtube.py b/youtube_dl/extractor/redtube.py index 1d2cf1f56..365aade56 100644 --- a/youtube_dl/extractor/redtube.py +++ b/youtube_dl/extractor/redtube.py @@ -14,24 +14,30 @@ class RedTubeIE(InfoExtractor): } } - def _real_extract(self,url): + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - video_extension = 'mp4' + video_extension = 'mp4' webpage = self._download_webpage(url, video_id) self.report_extraction(video_id) - video_url = self._html_search_regex(r'<source src="(.+?)" type="video/mp4">', - webpage, u'video URL') + video_url = self._html_search_regex( + r'<source src="(.+?)" type="video/mp4">', webpage, u'video URL') - video_title = self._html_search_regex('<h1 class="videoTitle slidePanelMovable">(.+?)</h1>', + video_title = self._html_search_regex( + r'<h1 class="videoTitle slidePanelMovable">(.+?)</h1>', webpage, u'title') - return [{ - 'id': video_id, - 'url': video_url, - 'ext': video_extension, - 'title': video_title, - }] + # No self-labeling, but they describe themselves as + # "Home of Videos Porno" + age_limit = 18 + + return { + 'id': video_id, + 'url': video_url, + 'ext': video_extension, + 'title': video_title, + 'age_limit': age_limit, + } diff --git a/youtube_dl/extractor/ro220.py b/youtube_dl/extractor/ro220.py new file mode 100644 index 000000000..c32f64d99 --- /dev/null +++ b/youtube_dl/extractor/ro220.py @@ -0,0 +1,42 @@ +import re + +from .common import InfoExtractor +from ..utils import ( + clean_html, + compat_parse_qs, +) + + +class Ro220IE(InfoExtractor): + IE_NAME = '220.ro' + _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<video_id>[^/]+)' + _TEST = { + u"url": u"http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/", + u'file': u'LYV6doKo7f.mp4', + u'md5': u'03af18b73a07b4088753930db7a34add', + u'info_dict': { + u"title": u"Luati-le Banii sez 4 ep 1", + u"description": u"Iata-ne reveniti dupa o binemeritata vacanta. Va astept si pe Facebook cu pareri si comentarii.", + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('video_id') + + webpage = self._download_webpage(url, video_id) + flashVars_str = self._search_regex( + r'<param name="flashVars" value="([^"]+)"', + webpage, u'flashVars') + flashVars = compat_parse_qs(flashVars_str) + + info = { + '_type': 'video', + 'id': video_id, + 'ext': 'mp4', + 'url': flashVars['videoURL'][0], + 'title': flashVars['title'][0], + 'description': clean_html(flashVars['desc'][0]), + 'thumbnail': flashVars['preview'][0], + } + return info diff --git a/youtube_dl/extractor/rottentomatoes.py b/youtube_dl/extractor/rottentomatoes.py new file mode 100644 index 000000000..c79c39413 --- /dev/null +++ b/youtube_dl/extractor/rottentomatoes.py @@ -0,0 +1,16 @@ +from .videodetective import VideoDetectiveIE + + +# It just uses the same method as videodetective.com, +# the internetvideoarchive.com is extracted from the og:video property +class RottenTomatoesIE(VideoDetectiveIE): + _VALID_URL = r'https?://www\.rottentomatoes\.com/m/[^/]+/trailers/(?P<id>\d+)' + + _TEST = { + u'url': u'http://www.rottentomatoes.com/m/toy_story_3/trailers/11028566/', + u'file': '613340.mp4', + u'info_dict': { + u'title': u'TOY STORY 3', + u'description': u'From the creators of the beloved TOY STORY films, comes a story that will reunite the gang in a whole new way.', + }, + } diff --git a/youtube_dl/extractor/roxwel.py b/youtube_dl/extractor/roxwel.py new file mode 100644 index 000000000..d339e6cb5 --- /dev/null +++ b/youtube_dl/extractor/roxwel.py @@ -0,0 +1,49 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import unified_strdate, determine_ext + + +class RoxwelIE(InfoExtractor): + _VALID_URL = r'https?://www\.roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)' + + _TEST = { + u'url': u'http://www.roxwel.com/player/passionpittakeawalklive.html', + u'file': u'passionpittakeawalklive.flv', + u'md5': u'd9dea8360a1e7d485d2206db7fe13035', + u'info_dict': { + u'title': u'Take A Walk (live)', + u'uploader': u'Passion Pit', + u'description': u'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ', + }, + u'skip': u'Requires rtmpdump', + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + filename = mobj.group('filename') + info_url = 'http://www.roxwel.com/api/videos/%s' % filename + info_page = self._download_webpage(info_url, filename, + u'Downloading video info') + + self.report_extraction(filename) + info = json.loads(info_page) + rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')]) + best_rate = rtmp_rates[-1] + url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate) + rtmp_url = self._download_webpage(url_page_url, filename, u'Downloading video url') + ext = determine_ext(rtmp_url) + if ext == 'f4v': + rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename) + + return {'id': filename, + 'title': info['title'], + 'url': rtmp_url, + 'ext': 'flv', + 'description': info['description'], + 'thumbnail': info.get('player_image_url') or info.get('image_url_large'), + 'uploader': info['artist'], + 'uploader_id': info['artistname'], + 'upload_date': unified_strdate(info['dbdate']), + } diff --git a/youtube_dl/extractor/rtlnow.py b/youtube_dl/extractor/rtlnow.py new file mode 100644 index 000000000..d1b08c9bc --- /dev/null +++ b/youtube_dl/extractor/rtlnow.py @@ -0,0 +1,156 @@ +# encoding: utf-8 +import re + +from .common import InfoExtractor +from ..utils import ( + clean_html, + ExtractorError, +) + +class RTLnowIE(InfoExtractor): + """Information Extractor for RTL NOW, RTL2 NOW, RTL NITRO, SUPER RTL NOW, VOX NOW and n-tv NOW""" + _VALID_URL = r'(?:http://)?(?P<url>(?P<base_url>rtl-now\.rtl\.de/|rtl2now\.rtl2\.de/|(?:www\.)?voxnow\.de/|(?:www\.)?rtlnitronow\.de/|(?:www\.)?superrtlnow\.de/|(?:www\.)?n-tvnow\.de/)[a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.php\?(?:container_id|film_id)=(?P<video_id>[0-9]+)&player=1(?:&season=[0-9]+)?(?:&.*)?)' + _TESTS = [{ + u'url': u'http://rtl-now.rtl.de/ahornallee/folge-1.php?film_id=90419&player=1&season=1', + u'file': u'90419.flv', + u'info_dict': { + u'upload_date': u'20070416', + u'title': u'Ahornallee - Folge 1 - Der Einzug', + u'description': u'Folge 1 - Der Einzug', + }, + u'params': { + u'skip_download': True, + }, + u'skip': u'Only works from Germany', + }, + { + u'url': u'http://rtl2now.rtl2.de/aerger-im-revier/episode-15-teil-1.php?film_id=69756&player=1&season=2&index=5', + u'file': u'69756.flv', + u'info_dict': { + u'upload_date': u'20120519', + u'title': u'Ärger im Revier - Ein junger Ladendieb, ein handfester Streit...', + u'description': u'Ärger im Revier - Ein junger Ladendieb, ein handfester Streit u.a.', + u'thumbnail': u'http://autoimg.static-fra.de/rtl2now/219850/1500x1500/image2.jpg', + }, + u'params': { + u'skip_download': True, + }, + u'skip': u'Only works from Germany', + }, + { + u'url': u'www.voxnow.de/voxtours/suedafrika-reporter-ii.php?film_id=13883&player=1&season=17', + u'file': u'13883.flv', + u'info_dict': { + u'upload_date': u'20090627', + u'title': u'Voxtours - Südafrika-Reporter II', + u'description': u'Südafrika-Reporter II', + }, + u'params': { + u'skip_download': True, + }, + }, + { + u'url': u'http://superrtlnow.de/medicopter-117/angst.php?film_id=99205&player=1', + u'file': u'99205.flv', + u'info_dict': { + u'upload_date': u'20080928', + u'title': u'Medicopter 117 - Angst!', + u'description': u'Angst!', + u'thumbnail': u'http://autoimg.static-fra.de/superrtlnow/287529/1500x1500/image2.jpg' + }, + u'params': { + u'skip_download': True, + }, + }, + { + u'url': u'http://www.rtlnitronow.de/recht-ordnung/lebensmittelkontrolle-erlangenordnungsamt-berlin.php?film_id=127367&player=1&season=1', + u'file': u'127367.flv', + u'info_dict': { + u'upload_date': u'20130926', + u'title': u'Recht & Ordnung - Lebensmittelkontrolle Erlangen/Ordnungsamt...', + u'description': u'Lebensmittelkontrolle Erlangen/Ordnungsamt Berlin', + u'thumbnail': u'http://autoimg.static-fra.de/nitronow/344787/1500x1500/image2.jpg', + }, + u'params': { + u'skip_download': True, + }, + }, + { + u'url': u'http://www.n-tvnow.de/top-gear/episode-1-2013-01-01-00-00-00.php?film_id=124903&player=1&season=10', + u'file': u'124903.flv', + u'info_dict': { + u'upload_date': u'20130101', + u'title': u'Top Gear vom 01.01.2013', + u'description': u'Episode 1', + }, + u'params': { + u'skip_download': True, + }, + u'skip': u'Only works from Germany', + }] + + + def _real_extract(self,url): + mobj = re.match(self._VALID_URL, url) + + webpage_url = u'http://' + mobj.group('url') + video_page_url = u'http://' + mobj.group('base_url') + video_id = mobj.group(u'video_id') + + webpage = self._download_webpage(webpage_url, video_id) + + note_m = re.search(r'''(?sx) + <div[ ]style="margin-left:[ ]20px;[ ]font-size:[ ]13px;">(.*?) + <div[ ]id="playerteaser">''', webpage) + if note_m: + msg = clean_html(note_m.group(1)) + raise ExtractorError(msg) + + video_title = self._html_search_regex(r'<title>(?P<title>[^<]+?)( \| [^<]*)?</title>', + webpage, u'title') + playerdata_url = self._html_search_regex(r'\'playerdata\': \'(?P<playerdata_url>[^\']+)\'', + webpage, u'playerdata_url') + + playerdata = self._download_webpage(playerdata_url, video_id) + mobj = re.search(r'<title><!\[CDATA\[(?P<description>.+?)(?:\s+- (?:Sendung )?vom (?P<upload_date_d>[0-9]{2})\.(?P<upload_date_m>[0-9]{2})\.(?:(?P<upload_date_Y>[0-9]{4})|(?P<upload_date_y>[0-9]{2})) [0-9]{2}:[0-9]{2} Uhr)?\]\]></title>', playerdata) + if mobj: + video_description = mobj.group(u'description') + if mobj.group('upload_date_Y'): + video_upload_date = mobj.group('upload_date_Y') + elif mobj.group('upload_date_y'): + video_upload_date = u'20' + mobj.group('upload_date_y') + else: + video_upload_date = None + if video_upload_date: + video_upload_date += mobj.group('upload_date_m')+mobj.group('upload_date_d') + else: + video_description = None + video_upload_date = None + self._downloader.report_warning(u'Unable to extract description and upload date') + + # Thumbnail: not every video has an thumbnail + mobj = re.search(r'<meta property="og:image" content="(?P<thumbnail>[^"]+)">', webpage) + if mobj: + video_thumbnail = mobj.group(u'thumbnail') + else: + video_thumbnail = None + + mobj = re.search(r'<filename [^>]+><!\[CDATA\[(?P<url>rtmpe://(?:[^/]+/){2})(?P<play_path>[^\]]+)\]\]></filename>', playerdata) + if mobj is None: + raise ExtractorError(u'Unable to extract media URL') + video_url = mobj.group(u'url') + video_play_path = u'mp4:' + mobj.group(u'play_path') + video_player_url = video_page_url + u'includes/vodplayer.swf' + + return [{ + 'id': video_id, + 'url': video_url, + 'play_path': video_play_path, + 'page_url': video_page_url, + 'player_url': video_player_url, + 'ext': 'flv', + 'title': video_title, + 'description': video_description, + 'upload_date': video_upload_date, + 'thumbnail': video_thumbnail, + }] diff --git a/youtube_dl/extractor/sina.py b/youtube_dl/extractor/sina.py new file mode 100644 index 000000000..14b1c656c --- /dev/null +++ b/youtube_dl/extractor/sina.py @@ -0,0 +1,67 @@ +# coding: utf-8 + +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( + compat_urllib_request, + compat_urllib_parse, +) + + +class SinaIE(InfoExtractor): + _VALID_URL = r'''https?://(.*?\.)?video\.sina\.com\.cn/ + ( + (.+?/(((?P<pseudo_id>\d+).html)|(.*?(\#|(vid=))(?P<id>\d+?)($|&)))) + | + # This is used by external sites like Weibo + (api/sinawebApi/outplay.php/(?P<token>.+?)\.swf) + ) + ''' + + _TEST = { + u'url': u'http://video.sina.com.cn/news/vlist/zt/chczlj2013/?opsubject_id=top12#110028898', + u'file': u'110028898.flv', + u'md5': u'd65dd22ddcf44e38ce2bf58a10c3e71f', + u'info_dict': { + u'title': u'《中国新闻》 朝鲜要求巴拿马立即释放被扣船员', + } + } + + @classmethod + def suitable(cls, url): + return re.match(cls._VALID_URL, url, flags=re.VERBOSE) is not None + + def _extract_video(self, video_id): + data = compat_urllib_parse.urlencode({'vid': video_id}) + url_page = self._download_webpage('http://v.iask.com/v_play.php?%s' % data, + video_id, u'Downloading video url') + image_page = self._download_webpage( + 'http://interface.video.sina.com.cn/interface/common/getVideoImage.php?%s' % data, + video_id, u'Downloading thumbnail info') + url_doc = xml.etree.ElementTree.fromstring(url_page.encode('utf-8')) + + return {'id': video_id, + 'url': url_doc.find('./durl/url').text, + 'ext': 'flv', + 'title': url_doc.find('./vname').text, + 'thumbnail': image_page.split('=')[1], + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) + video_id = mobj.group('id') + if mobj.group('token') is not None: + # The video id is in the redirected url + self.to_screen(u'Getting video id') + request = compat_urllib_request.Request(url) + request.get_method = lambda: 'HEAD' + (_, urlh) = self._download_webpage_handle(request, 'NA', False) + return self._real_extract(urlh.geturl()) + elif video_id is None: + pseudo_id = mobj.group('pseudo_id') + webpage = self._download_webpage(url, pseudo_id) + video_id = self._search_regex(r'vid:\'(\d+?)\'', webpage, u'video id') + + return self._extract_video(video_id) diff --git a/youtube_dl/extractor/slashdot.py b/youtube_dl/extractor/slashdot.py new file mode 100644 index 000000000..2cba53076 --- /dev/null +++ b/youtube_dl/extractor/slashdot.py @@ -0,0 +1,23 @@ +import re + +from .common import InfoExtractor + + +class SlashdotIE(InfoExtractor): + _VALID_URL = r'https?://tv.slashdot.org/video/\?embed=(?P<id>.*?)(&|$)' + + _TEST = { + u'url': u'http://tv.slashdot.org/video/?embed=JscHMzZDplD0p-yNLOzTfzC3Q3xzJaUz', + u'file': u'JscHMzZDplD0p-yNLOzTfzC3Q3xzJaUz.mp4', + u'md5': u'd2222e7a4a4c1541b3e0cf732fb26735', + u'info_dict': { + u'title': u' Meet the Stampede Supercomputing Cluster\'s Administrator', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + ooyala_url = self._search_regex(r'<script src="(.*?)"', webpage, 'ooyala url') + return self.url_result(ooyala_url, 'Ooyala') diff --git a/youtube_dl/extractor/slideshare.py b/youtube_dl/extractor/slideshare.py new file mode 100644 index 000000000..afc3001b5 --- /dev/null +++ b/youtube_dl/extractor/slideshare.py @@ -0,0 +1,47 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urlparse, + ExtractorError, +) + + +class SlideshareIE(InfoExtractor): + _VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)' + + _TEST = { + u'url': u'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity', + u'file': u'25665706.mp4', + u'info_dict': { + u'title': u'Managing Scale and Complexity', + u'description': u'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + page_title = mobj.group('title') + webpage = self._download_webpage(url, page_title) + slideshare_obj = self._search_regex( + r'var slideshare_object = ({.*?}); var user_info =', + webpage, u'slideshare object') + info = json.loads(slideshare_obj) + if info['slideshow']['type'] != u'video': + raise ExtractorError(u'Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True) + + doc = info['doc'] + bucket = info['jsplayer']['video_bucket'] + ext = info['jsplayer']['video_extension'] + video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext) + + return { + '_type': 'video', + 'id': info['slideshow']['id'], + 'title': info['slideshow']['title'], + 'ext': ext, + 'url': video_url, + 'thumbnail': info['slideshow']['pin_image_url'], + 'description': self._og_search_description(webpage), + } diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py new file mode 100644 index 000000000..2b9bf0cb7 --- /dev/null +++ b/youtube_dl/extractor/sohu.py @@ -0,0 +1,94 @@ +# encoding: utf-8 + +import json +import re + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class SohuIE(InfoExtractor): + _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' + + _TEST = { + u'url': u'http://tv.sohu.com/20130724/n382479172.shtml#super', + u'file': u'382479172.mp4', + u'md5': u'bde8d9a6ffd82c63a1eefaef4eeefec7', + u'info_dict': { + u'title': u'MV:Far East Movement《The Illest》', + }, + } + + def _real_extract(self, url): + + def _fetch_data(vid_id, mytv=False): + if mytv: + base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' + else: + base_data_url = u'http://hot.vrs.sohu.com/vrs_flash.action?vid=' + data_url = base_data_url + str(vid_id) + data_json = self._download_webpage( + data_url, video_id, + note=u'Downloading JSON data for ' + str(vid_id)) + return json.loads(data_json) + + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + mytv = mobj.group('mytv') is not None + + webpage = self._download_webpage(url, video_id) + raw_title = self._html_search_regex(r'(?s)<title>(.+?)</title>', + webpage, u'video title') + title = raw_title.partition('-')[0].strip() + + vid = self._html_search_regex(r'var vid ?= ?["\'](\d+)["\']', webpage, + u'video path') + data = _fetch_data(vid, mytv) + + QUALITIES = ('ori', 'super', 'high', 'nor') + vid_ids = [data['data'][q + 'Vid'] + for q in QUALITIES + if data['data'][q + 'Vid'] != 0] + if not vid_ids: + raise ExtractorError(u'No formats available for this video') + + # For now, we just pick the highest available quality + vid_id = vid_ids[-1] + + format_data = data if vid == vid_id else _fetch_data(vid_id, mytv) + part_count = format_data['data']['totalBlocks'] + allot = format_data['allot'] + prot = format_data['prot'] + clipsURL = format_data['data']['clipsURL'] + su = format_data['data']['su'] + + playlist = [] + for i in range(part_count): + part_url = ('http://%s/?prot=%s&file=%s&new=%s' % + (allot, prot, clipsURL[i], su[i])) + part_str = self._download_webpage( + part_url, video_id, + note=u'Downloading part %d of %d' % (i+1, part_count)) + + part_info = part_str.split('|') + video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3]) + + video_info = { + 'id': '%s_part%02d' % (video_id, i + 1), + 'title': title, + 'url': video_url, + 'ext': 'mp4', + } + playlist.append(video_info) + + if len(playlist) == 1: + info = playlist[0] + info['id'] = video_id + else: + info = { + '_type': 'playlist', + 'entries': playlist, + 'id': video_id, + } + + return info diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index d47c49c03..29cd5617c 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -1,9 +1,12 @@ import json import re +import itertools from .common import InfoExtractor from ..utils import ( compat_str, + compat_urlparse, + compat_urllib_parse, ExtractorError, unified_strdate, @@ -19,7 +22,12 @@ class SoundcloudIE(InfoExtractor): of the stream token and uid """ - _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)(?:[?].*)?$' + _VALID_URL = r'''^(?:https?://)? + (?:(?:(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)/?(?:[?].*)?$) + |(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)) + |(?P<widget>w.soundcloud.com/player/?.*?url=.*) + ) + ''' IE_NAME = u'soundcloud' _TEST = { u'url': u'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy', @@ -33,59 +41,69 @@ class SoundcloudIE(InfoExtractor): } } + _CLIENT_ID = 'b45b1aa10f1ac2941910a7f0d10f8e28' + + @classmethod + def suitable(cls, url): + return re.match(cls._VALID_URL, url, flags=re.VERBOSE) is not None + def report_resolve(self, video_id): """Report information extraction.""" self.to_screen(u'%s: Resolving id' % video_id) - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) - - # extract uploader (which is in the url) - uploader = mobj.group(1) - # extract simple title (uploader + slug of song title) - slug_title = mobj.group(2) - full_title = '%s/%s' % (uploader, slug_title) - - self.report_resolve(full_title) - - url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title) - resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28' - info_json = self._download_webpage(resolv_url, full_title, u'Downloading info JSON') + @classmethod + def _resolv_url(cls, url): + return 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=' + cls._CLIENT_ID - info = json.loads(info_json) + def _extract_info_dict(self, info, full_title=None, quiet=False): video_id = info['id'] - self.report_extraction(full_title) - - streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28' - stream_json = self._download_webpage(streams_url, full_title, - u'Downloading stream definitions', - u'unable to download stream definitions') - - streams = json.loads(stream_json) - mediaURL = streams['http_mp3_128_url'] - upload_date = unified_strdate(info['created_at']) - - return [{ + name = full_title or video_id + if quiet == False: + self.report_extraction(name) + + thumbnail = info['artwork_url'] + if thumbnail is not None: + thumbnail = thumbnail.replace('-large', '-t500x500') + return { 'id': info['id'], - 'url': mediaURL, + 'url': info['stream_url'] + '?client_id=' + self._CLIENT_ID, 'uploader': info['user']['username'], - 'upload_date': upload_date, + 'upload_date': unified_strdate(info['created_at']), 'title': info['title'], 'ext': u'mp3', 'description': info['description'], - }] + 'thumbnail': thumbnail, + } -class SoundcloudSetIE(InfoExtractor): - """Information extractor for soundcloud.com sets - To access the media, the uid of the song and a stream token - must be extracted from the page source and the script must make - a request to media.soundcloud.com/crossdomain.xml. Then - the media can be grabbed by requesting from an url composed - of the stream token and uid - """ + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) + if mobj is None: + raise ExtractorError(u'Invalid URL: %s' % url) + + track_id = mobj.group('track_id') + if track_id is not None: + info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID + full_title = track_id + elif mobj.group('widget'): + query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + return self.url_result(query['url'][0], ie='Soundcloud') + else: + # extract uploader (which is in the url) + uploader = mobj.group(1) + # extract simple title (uploader + slug of song title) + slug_title = mobj.group(2) + full_title = '%s/%s' % (uploader, slug_title) + + self.report_resolve(full_title) + + url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title) + info_json_url = self._resolv_url(url) + info_json = self._download_webpage(info_json_url, full_title, u'Downloading info JSON') + info = json.loads(info_json) + return self._extract_info_dict(info, full_title) + +class SoundcloudSetIE(SoundcloudIE): _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)(?:[?].*)?$' IE_NAME = u'soundcloud:set' _TEST = { @@ -153,10 +171,6 @@ class SoundcloudSetIE(InfoExtractor): ] } - def report_resolve(self, video_id): - """Report information extraction.""" - self.to_screen(u'%s: Resolving id' % video_id) - def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) if mobj is None: @@ -171,7 +185,7 @@ class SoundcloudSetIE(InfoExtractor): self.report_resolve(full_title) url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title) - resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28' + resolv_url = self._resolv_url(url) info_json = self._download_webpage(resolv_url, full_title) videos = [] @@ -182,23 +196,46 @@ class SoundcloudSetIE(InfoExtractor): return self.report_extraction(full_title) - for track in info['tracks']: - video_id = track['id'] - - streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28' - stream_json = self._download_webpage(streams_url, video_id, u'Downloading track info JSON') - - self.report_extraction(video_id) - streams = json.loads(stream_json) - mediaURL = streams['http_mp3_128_url'] - - videos.append({ - 'id': video_id, - 'url': mediaURL, - 'uploader': track['user']['username'], - 'upload_date': unified_strdate(track['created_at']), - 'title': track['title'], - 'ext': u'mp3', - 'description': track['description'], - }) - return videos + return {'_type': 'playlist', + 'entries': [self._extract_info_dict(track) for track in info['tracks']], + 'id': info['id'], + 'title': info['title'], + } + + +class SoundcloudUserIE(SoundcloudIE): + _VALID_URL = r'https?://(www\.)?soundcloud.com/(?P<user>[^/]+)(/?(tracks/)?)?(\?.*)?$' + IE_NAME = u'soundcloud:user' + + # it's in tests/test_playlists.py + _TEST = None + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + uploader = mobj.group('user') + + url = 'http://soundcloud.com/%s/' % uploader + resolv_url = self._resolv_url(url) + user_json = self._download_webpage(resolv_url, uploader, + u'Downloading user info') + user = json.loads(user_json) + + tracks = [] + for i in itertools.count(): + data = compat_urllib_parse.urlencode({'offset': i*50, + 'client_id': self._CLIENT_ID, + }) + tracks_url = 'http://api.soundcloud.com/users/%s/tracks.json?' % user['id'] + data + response = self._download_webpage(tracks_url, uploader, + u'Downloading tracks page %s' % (i+1)) + new_tracks = json.loads(response) + tracks.extend(self._extract_info_dict(track, quiet=True) for track in new_tracks) + if len(new_tracks) < 50: + break + + return { + '_type': 'playlist', + 'id': compat_str(user['id']), + 'title': user['username'], + 'entries': tracks, + } diff --git a/youtube_dl/extractor/southparkstudios.py b/youtube_dl/extractor/southparkstudios.py new file mode 100644 index 000000000..b1e96b679 --- /dev/null +++ b/youtube_dl/extractor/southparkstudios.py @@ -0,0 +1,38 @@ +import re + +from .mtv import MTVIE, _media_xml_tag + + +class SouthParkStudiosIE(MTVIE): + IE_NAME = u'southparkstudios.com' + _VALID_URL = r'https?://www\.southparkstudios\.com/(clips|full-episodes)/(?P<id>.+?)(\?|#|$)' + + _FEED_URL = 'http://www.southparkstudios.com/feeds/video-player/mrss' + + _TEST = { + u'url': u'http://www.southparkstudios.com/clips/104437/bat-daded#tab=featured', + u'file': u'a7bff6c2-ed00-11e0-aca6-0026b9414f30.mp4', + u'info_dict': { + u'title': u'Bat Daded', + u'description': u'Randy disqualifies South Park by getting into a fight with Bat Dad.', + }, + } + + # Overwrite MTVIE properties we don't want + _TESTS = [] + + def _get_thumbnail_url(self, uri, itemdoc): + search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail')) + thumb_node = itemdoc.find(search_path) + if thumb_node is None: + return None + else: + return thumb_node.attrib['url'] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + mgid = self._search_regex(r'swfobject.embedSWF\(".*?(mgid:.*?)"', + webpage, u'mgid') + return self._get_videos_info(mgid) diff --git a/youtube_dl/extractor/statigram.py b/youtube_dl/extractor/statigram.py index b8e6b3bf9..1ea4a9f2f 100644 --- a/youtube_dl/extractor/statigram.py +++ b/youtube_dl/extractor/statigram.py @@ -5,13 +5,13 @@ from .common import InfoExtractor class StatigramIE(InfoExtractor): _VALID_URL = r'(?:http://)?(?:www\.)?statigr\.am/p/([^/]+)' _TEST = { - u'url': u'http://statigr.am/p/484091715184808010_284179915', - u'file': u'484091715184808010_284179915.mp4', - u'md5': u'deda4ff333abe2e118740321e992605b', + u'url': u'http://statigr.am/p/522207370455279102_24101272', + u'file': u'522207370455279102_24101272.mp4', + u'md5': u'6eb93b882a3ded7c378ee1d6884b1814', u'info_dict': { - u"uploader_id": u"videoseconds", - u"title": u"Instagram photo by @videoseconds" - } + u'uploader_id': u'aguynamedpatrick', + u'title': u'Instagram photo by @aguynamedpatrick (Patrick Janelle)', + }, } def _real_extract(self, url): diff --git a/youtube_dl/extractor/subtitles.py b/youtube_dl/extractor/subtitles.py new file mode 100644 index 000000000..90de7de3a --- /dev/null +++ b/youtube_dl/extractor/subtitles.py @@ -0,0 +1,91 @@ +from .common import InfoExtractor + +from ..utils import ( + compat_str, + ExtractorError, +) + + +class SubtitlesInfoExtractor(InfoExtractor): + @property + def _have_to_download_any_subtitles(self): + return any([self._downloader.params.get('writesubtitles', False), + self._downloader.params.get('writeautomaticsub')]) + + def _list_available_subtitles(self, video_id, webpage=None): + """ outputs the available subtitles for the video """ + sub_lang_list = self._get_available_subtitles(video_id) + auto_captions_list = self._get_available_automatic_caption(video_id, webpage) + sub_lang = ",".join(list(sub_lang_list.keys())) + self.to_screen(u'%s: Available subtitles for video: %s' % + (video_id, sub_lang)) + auto_lang = ",".join(auto_captions_list.keys()) + self.to_screen(u'%s: Available automatic captions for video: %s' % + (video_id, auto_lang)) + + def extract_subtitles(self, video_id, video_webpage=None): + """ + returns {sub_lang: sub} ,{} if subtitles not found or None if the + subtitles aren't requested. + """ + if not self._have_to_download_any_subtitles: + return None + available_subs_list = {} + if self._downloader.params.get('writeautomaticsub', False): + available_subs_list.update(self._get_available_automatic_caption(video_id, video_webpage)) + if self._downloader.params.get('writesubtitles', False): + available_subs_list.update(self._get_available_subtitles(video_id)) + + if not available_subs_list: # error, it didn't get the available subtitles + return {} + if self._downloader.params.get('allsubtitles', False): + sub_lang_list = available_subs_list + else: + if self._downloader.params.get('subtitleslangs', False): + requested_langs = self._downloader.params.get('subtitleslangs') + elif 'en' in available_subs_list: + requested_langs = ['en'] + else: + requested_langs = [list(available_subs_list.keys())[0]] + + sub_lang_list = {} + for sub_lang in requested_langs: + if not sub_lang in available_subs_list: + self._downloader.report_warning(u'no closed captions found in the specified language "%s"' % sub_lang) + continue + sub_lang_list[sub_lang] = available_subs_list[sub_lang] + + subtitles = {} + for sub_lang, url in sub_lang_list.items(): + subtitle = self._request_subtitle_url(sub_lang, url) + if subtitle: + subtitles[sub_lang] = subtitle + return subtitles + + def _request_subtitle_url(self, sub_lang, url): + """ makes the http request for the subtitle """ + try: + sub = self._download_webpage(url, None, note=False) + except ExtractorError as err: + self._downloader.report_warning(u'unable to download video subtitles for %s: %s' % (sub_lang, compat_str(err))) + return + if not sub: + self._downloader.report_warning(u'Did not fetch video subtitles') + return + return sub + + def _get_available_subtitles(self, video_id): + """ + returns {sub_lang: url} or {} if not available + Must be redefined by the subclasses + """ + pass + + def _get_available_automatic_caption(self, video_id, webpage): + """ + returns {sub_lang: url} or {} if not available + Must be redefined by the subclasses that support automatic captions, + otherwise it will return {} + """ + self._downloader.report_warning(u'Automatic Captions not supported by this server') + return {} diff --git a/youtube_dl/extractor/sztvhu.py b/youtube_dl/extractor/sztvhu.py new file mode 100644 index 000000000..cd3e203e6 --- /dev/null +++ b/youtube_dl/extractor/sztvhu.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +import re + +from .common import InfoExtractor +from ..utils import determine_ext + + +class SztvHuIE(InfoExtractor): + _VALID_URL = r'(?:http://)?(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)' + _TEST = { + u'url': u'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', + u'file': u'20130909.mp4', + u'md5': u'a6df607b11fb07d0e9f2ad94613375cb', + u'info_dict': { + u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren", + u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + video_file = self._search_regex( + r'file: "...:(.*?)",', webpage, 'video file') + title = self._html_search_regex( + r'<meta name="title" content="([^"]*) - [^-]*"', + webpage, 'video title') + description = self._html_search_regex( + r'<meta name="description" content="([^"]*)"/>', + webpage, 'video description', fatal=False) + thumbnail = self._og_search_thumbnail(webpage) + + video_url = 'http://media.sztv.hu/vod/' + video_file + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'ext': determine_ext(video_url), + 'description': description, + 'thumbnail': thumbnail, + } diff --git a/youtube_dl/extractor/teamcoco.py b/youtube_dl/extractor/teamcoco.py index ec92e589a..c910110ca 100644 --- a/youtube_dl/extractor/teamcoco.py +++ b/youtube_dl/extractor/teamcoco.py @@ -33,7 +33,7 @@ class TeamcocoIE(InfoExtractor): data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id data = self._download_webpage(data_url, video_id, 'Downloading data webpage') - video_url = self._html_search_regex(r'<file type="high".*?>(.*?)</file>', + video_url = self._html_search_regex(r'<file [^>]*type="high".*?>(.*?)</file>', data, u'video URL') return [{ diff --git a/youtube_dl/extractor/ted.py b/youtube_dl/extractor/ted.py index 8b73b8340..dfa1176a3 100644 --- a/youtube_dl/extractor/ted.py +++ b/youtube_dl/extractor/ted.py @@ -67,7 +67,7 @@ class TEDIE(InfoExtractor): webpage = self._download_webpage(url, video_id, 'Downloading \"%s\" page' % video_name) self.report_extraction(video_name) # If the url includes the language we get the title translated - title = self._html_search_regex(r'<span id="altHeadline" >(?P<title>.*)</span>', + title = self._html_search_regex(r'<span .*?id="altHeadline".+?>(?P<title>.*)</span>', webpage, 'title') json_data = self._search_regex(r'<script.*?>var talkDetails = ({.*?})</script>', webpage, 'json data') @@ -77,12 +77,20 @@ class TEDIE(InfoExtractor): thumbnail = self._search_regex(r'</span>[\s.]*</div>[\s.]*<img src="(.*?)"', webpage, 'thumbnail') + formats = [{ + 'ext': 'mp4', + 'url': stream['file'], + 'format': stream['id'] + } for stream in info['htmlStreams']] info = { - 'id': info['id'], - 'url': info['htmlStreams'][-1]['file'], - 'ext': 'mp4', - 'title': title, - 'thumbnail': thumbnail, - 'description': desc, - } + 'id': info['id'], + 'title': title, + 'thumbnail': thumbnail, + 'description': desc, + 'formats': formats, + } + + # TODO: Remove when #980 has been merged + info.update(info['formats'][-1]) + return info diff --git a/youtube_dl/extractor/tf1.py b/youtube_dl/extractor/tf1.py index e0ffeced5..772134a12 100644 --- a/youtube_dl/extractor/tf1.py +++ b/youtube_dl/extractor/tf1.py @@ -6,19 +6,17 @@ import re from .common import InfoExtractor class TF1IE(InfoExtractor): - """ - TF1 uses the wat.tv player, currently it can only download videos with the - html5 player enabled, it cannot download HD videos. - """ + """TF1 uses the wat.tv player.""" _VALID_URL = r'http://videos.tf1.fr/.*-(.*?).html' _TEST = { u'url': u'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html', u'file': u'10635995.mp4', - u'md5': u'66789d3e91278d332f75e1feb7aea327', + u'md5': u'2e378cc28b9957607d5e88f274e637d8', u'info_dict': { u'title': u'Citroën Grand C4 Picasso 2013 : présentation officielle', u'description': u'Vidéo officielle du nouveau Citroën Grand C4 Picasso, lancé à l\'automne 2013.', - } + }, + u'skip': u'Sometimes wat serves the whole file with the --test option', } def _real_extract(self, url): diff --git a/youtube_dl/extractor/thisav.py b/youtube_dl/extractor/thisav.py new file mode 100644 index 000000000..9dcfc28b3 --- /dev/null +++ b/youtube_dl/extractor/thisav.py @@ -0,0 +1,47 @@ +#coding: utf-8 + +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, +) + +class ThisAVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?thisav\.com/video/(?P<id>[0-9]+)/.*' + _TEST = { + u"url": u"http://www.thisav.com/video/47734/%98%26sup1%3B%83%9E%83%82---just-fit.html", + u"file": u"47734.flv", + u"md5": u"0480f1ef3932d901f0e0e719f188f19b", + u"info_dict": { + u"title": u"高樹マリア - Just fit", + u"uploader": u"dj7970", + u"uploader_id": u"dj7970" + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + title = self._html_search_regex(r'<h1>([^<]*)</h1>', webpage, u'title') + video_url = self._html_search_regex( + r"addVariable\('file','([^']+)'\);", webpage, u'video url') + uploader = self._html_search_regex( + r': <a href="http://www.thisav.com/user/[0-9]+/(?:[^"]+)">([^<]+)</a>', + webpage, u'uploader name', fatal=False) + uploader_id = self._html_search_regex( + r': <a href="http://www.thisav.com/user/[0-9]+/([^"]+)">(?:[^<]+)</a>', + webpage, u'uploader id', fatal=False) + ext = determine_ext(video_url) + + return { + '_type': 'video', + 'id': video_id, + 'url': video_url, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'title': title, + 'ext': ext, + } diff --git a/youtube_dl/extractor/traileraddict.py b/youtube_dl/extractor/traileraddict.py index 324bb6231..35f89e9ee 100644 --- a/youtube_dl/extractor/traileraddict.py +++ b/youtube_dl/extractor/traileraddict.py @@ -4,11 +4,11 @@ from .common import InfoExtractor class TrailerAddictIE(InfoExtractor): - _VALID_URL = r'(?:http://)?(?:www\.)?traileraddict\.com/trailer/([^/]+)/(?:trailer|feature-trailer)' + _VALID_URL = r'(?:http://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P<movie>.+?)/(?P<trailer_name>.+)' _TEST = { u'url': u'http://www.traileraddict.com/trailer/prince-avalanche/trailer', u'file': u'76184.mp4', - u'md5': u'41365557f3c8c397d091da510e73ceb4', + u'md5': u'57e39dbcf4142ceb8e1f242ff423fd71', u'info_dict': { u"title": u"Prince Avalanche Trailer", u"description": u"Trailer for Prince Avalanche.Two highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind." @@ -17,24 +17,30 @@ class TrailerAddictIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - video_id = mobj.group(1) - webpage = self._download_webpage(url, video_id) - + name = mobj.group('movie') + '/' + mobj.group('trailer_name') + webpage = self._download_webpage(url, name) + title = self._search_regex(r'<title>(.+?)</title>', webpage, 'video title').replace(' - Trailer Addict','') view_count = self._search_regex(r'Views: (.+?)<br />', webpage, 'Views Count') video_id = self._og_search_property('video', webpage, 'Video id').split('=')[1] - info_url = "http://www.traileraddict.com/fvar.php?tid=%s" %(str(video_id)) + # Presence of (no)watchplus function indicates HD quality is available + if re.search(r'function (no)?watchplus()', webpage): + fvar = "fvarhd" + else: + fvar = "fvar" + + info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id)) info_webpage = self._download_webpage(info_url, video_id , "Downloading the info webpage") - + final_url = self._search_regex(r'&fileurl=(.+)', info_webpage, 'Download url').replace('%3F','?') thumbnail_url = self._search_regex(r'&image=(.+?)&', info_webpage, 'thumbnail url') ext = final_url.split('.')[-1].split('?')[0] - + return [{ 'id' : video_id, 'url' : final_url, diff --git a/youtube_dl/extractor/trilulilu.py b/youtube_dl/extractor/trilulilu.py new file mode 100644 index 000000000..0bf028f61 --- /dev/null +++ b/youtube_dl/extractor/trilulilu.py @@ -0,0 +1,73 @@ +import json +import re +import xml.etree.ElementTree + +from .common import InfoExtractor + + +class TriluliluIE(InfoExtractor): + _VALID_URL = r'(?x)(?:https?://)?(?:www\.)?trilulilu\.ro/video-(?P<category>[^/]+)/(?P<video_id>[^/]+)' + _TEST = { + u"url": u"http://www.trilulilu.ro/video-animatie/big-buck-bunny-1", + u'file': u"big-buck-bunny-1.mp4", + u'info_dict': { + u"title": u"Big Buck Bunny", + u"description": u":) pentru copilul din noi", + }, + # Server ignores Range headers (--test) + u"params": { + u"skip_download": True + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('video_id') + + webpage = self._download_webpage(url, video_id) + + title = self._og_search_title(webpage) + thumbnail = self._og_search_thumbnail(webpage) + description = self._og_search_description(webpage) + + log_str = self._search_regex( + r'block_flash_vars[ ]=[ ]({[^}]+})', webpage, u'log info') + log = json.loads(log_str) + + format_url = (u'http://fs%(server)s.trilulilu.ro/%(hash)s/' + u'video-formats2' % log) + format_str = self._download_webpage( + format_url, video_id, + note=u'Downloading formats', + errnote=u'Error while downloading formats') + + format_doc = xml.etree.ElementTree.fromstring(format_str) + + video_url_template = ( + u'http://fs%(server)s.trilulilu.ro/stream.php?type=video' + u'&source=site&hash=%(hash)s&username=%(userid)s&' + u'key=ministhebest&format=%%s&sig=&exp=' % + log) + formats = [ + { + 'format': fnode.text, + 'url': video_url_template % fnode.text, + 'ext': fnode.text.partition('-')[0] + } + + for fnode in format_doc.findall('./formats/format') + ] + + info = { + '_type': 'video', + 'id': video_id, + 'formats': formats, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + } + + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + + return info diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py index 1405b73f7..79679a14a 100644 --- a/youtube_dl/extractor/tudou.py +++ b/youtube_dl/extractor/tudou.py @@ -7,15 +7,25 @@ from .common import InfoExtractor class TudouIE(InfoExtractor): - _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?' - _TEST = { + _VALID_URL = r'(?:http://)?(?:www\.)?tudou\.com/(?:listplay|programs|albumplay)/(?:view|(.+?))/(?:([^/]+)|([^/]+))(?:\.html)?' + _TESTS = [{ u'url': u'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html', u'file': u'159448201.f4v', u'md5': u'140a49ed444bd22f93330985d8475fcb', u'info_dict': { u"title": u"卡马乔国足开大脚长传冲吊集锦" } - } + }, + { + u'url': u'http://www.tudou.com/albumplay/TenTw_JgiPM/PzsAs5usU9A.html', + u'file': u'todo.mp4', + u'md5': u'todo.mp4', + u'info_dict': { + u'title': u'todo.mp4', + }, + u'add_ie': [u'Youku'], + u'skip': u'Only works from China' + }] def _url_for_id(self, id, quality = None): info_url = "http://v2.tudou.com/f?id="+str(id) @@ -29,14 +39,18 @@ class TudouIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group(2) webpage = self._download_webpage(url, video_id) - title = re.search(",kw:\"(.+)\"",webpage) - if title is None: - title = re.search(",kw: \'(.+)\'",webpage) - title = title.group(1) - thumbnail_url = re.search(",pic: \'(.+?)\'",webpage) - if thumbnail_url is None: - thumbnail_url = re.search(",pic:\"(.+?)\"",webpage) - thumbnail_url = thumbnail_url.group(1) + + m = re.search(r'vcode:\s*[\'"](.+?)[\'"]', webpage) + if m and m.group(1): + return { + '_type': 'url', + 'url': u'youku:' + m.group(1), + 'ie_key': 'Youku' + } + + title = self._search_regex(r",kw:['\"](.+?)[\"']", webpage, u'title') + thumbnail_url = self._search_regex( + r",pic:\s*[\"'](.+?)[\"']", webpage, u'thumbnail URL', fatal=False) segs_json = self._search_regex(r'segs: \'(.*)\'', webpage, 'segments') segments = json.loads(segs_json) diff --git a/youtube_dl/extractor/unistra.py b/youtube_dl/extractor/unistra.py new file mode 100644 index 000000000..516e18914 --- /dev/null +++ b/youtube_dl/extractor/unistra.py @@ -0,0 +1,32 @@ +import re + +from .common import InfoExtractor + +class UnistraIE(InfoExtractor): + _VALID_URL = r'http://utv.unistra.fr/(?:index|video).php\?id_video\=(\d+)' + + _TEST = { + u'url': u'http://utv.unistra.fr/video.php?id_video=154', + u'file': u'154.mp4', + u'md5': u'736f605cfdc96724d55bb543ab3ced24', + u'info_dict': { + u'title': u'M!ss Yella', + u'description': u'md5:104892c71bd48e55d70b902736b81bbf', + }, + } + + def _real_extract(self, url): + id = re.match(self._VALID_URL, url).group(1) + webpage = self._download_webpage(url, id) + file = re.search(r'file: "(.*?)",', webpage).group(1) + title = self._html_search_regex(r'<title>UTV - (.*?)</', webpage, u'title') + + video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file + + return {'id': id, + 'title': title, + 'ext': 'mp4', + 'url': video_url, + 'description': self._html_search_regex(r'<meta name="Description" content="(.*?)"', webpage, u'description', flags=re.DOTALL), + 'thumbnail': self._search_regex(r'image: "(.*?)"', webpage, u'thumbnail'), + } diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py index 5f423870a..74c82587f 100644 --- a/youtube_dl/extractor/ustream.py +++ b/youtube_dl/extractor/ustream.py @@ -1,6 +1,11 @@ +import json import re from .common import InfoExtractor +from ..utils import ( + compat_urlparse, + get_meta_content, +) class UstreamIE(InfoExtractor): @@ -43,3 +48,25 @@ class UstreamIE(InfoExtractor): 'thumbnail': thumbnail, } return info + +class UstreamChannelIE(InfoExtractor): + _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)' + IE_NAME = u'ustream:channel' + + def _real_extract(self, url): + m = re.match(self._VALID_URL, url) + slug = m.group('slug') + webpage = self._download_webpage(url, slug) + channel_id = get_meta_content('ustream:channel_id', webpage) + + BASE = 'http://www.ustream.tv' + next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id + video_ids = [] + while next_url: + reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id)) + video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data'])) + next_url = reply['nextUrl'] + + urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids] + url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls] + return self.playlist_result(url_entries, channel_id) diff --git a/youtube_dl/extractor/veehd.py b/youtube_dl/extractor/veehd.py new file mode 100644 index 000000000..3a99a29c6 --- /dev/null +++ b/youtube_dl/extractor/veehd.py @@ -0,0 +1,56 @@ +import re +import json + +from .common import InfoExtractor +from ..utils import ( + compat_urlparse, + get_element_by_id, + clean_html, +) + +class VeeHDIE(InfoExtractor): + _VALID_URL = r'https?://veehd.com/video/(?P<id>\d+)' + + _TEST = { + u'url': u'http://veehd.com/video/4686958', + u'file': u'4686958.mp4', + u'info_dict': { + u'title': u'Time Lapse View from Space ( ISS)', + u'uploader_id': u'spotted', + u'description': u'md5:f0094c4cf3a72e22bc4e4239ef767ad7', + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + webpage = self._download_webpage(url, video_id) + player_path = self._search_regex(r'\$\("#playeriframe"\).attr\({src : "(.+?)"', + webpage, u'player path') + player_url = compat_urlparse.urljoin(url, player_path) + player_page = self._download_webpage(player_url, video_id, + u'Downloading player page') + config_json = self._search_regex(r'value=\'config=({.+?})\'', + player_page, u'config json') + config = json.loads(config_json) + + video_url = compat_urlparse.unquote(config['clip']['url']) + title = clean_html(get_element_by_id('videoName', webpage).rpartition('|')[0]) + uploader_id = self._html_search_regex(r'<a href="/profile/\d+">(.+?)</a>', + webpage, u'uploader') + thumbnail = self._search_regex(r'<img id="veehdpreview" src="(.+?)"', + webpage, u'thumbnail') + description = self._html_search_regex(r'<td class="infodropdown".*?<div>(.*?)<ul', + webpage, u'description', flags=re.DOTALL) + + return { + '_type': 'video', + 'id': video_id, + 'title': title, + 'url': video_url, + 'ext': 'mp4', + 'uploader_id': uploader_id, + 'thumbnail': thumbnail, + 'description': description, + } diff --git a/youtube_dl/extractor/vevo.py b/youtube_dl/extractor/vevo.py index 3b16dcfbc..1c1cc418d 100644 --- a/youtube_dl/extractor/vevo.py +++ b/youtube_dl/extractor/vevo.py @@ -1,25 +1,29 @@ import re import json +import xml.etree.ElementTree +import datetime from .common import InfoExtractor from ..utils import ( + determine_ext, ExtractorError, ) + class VevoIE(InfoExtractor): """ - Accecps urls from vevo.com or in the format 'vevo:{id}' + Accepts urls from vevo.com or in the format 'vevo:{id}' (currently used by MTVIE) """ - _VALID_URL = r'((http://www.vevo.com/watch/.*?/.*?/)|(vevo:))(?P<id>.*)$' + _VALID_URL = r'((http://www.vevo.com/watch/.*?/.*?/)|(vevo:))(?P<id>.*?)(\?|$)' _TEST = { u'url': u'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280', u'file': u'GB1101300280.mp4', - u'md5': u'06bea460acb744eab74a9d7dcb4bfd61', u'info_dict': { - u"upload_date": u"20130624", - u"uploader": u"Hurts", - u"title": u"Somebody To Die For" + u"upload_date": u"20130624", + u"uploader": u"Hurts", + u"title": u"Somebody to Die For", + u'duration': 230, } } @@ -27,27 +31,47 @@ class VevoIE(InfoExtractor): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - json_url = 'http://www.vevo.com/data/video/%s' % video_id - base_url = 'http://smil.lvl3.vevo.com' - videos_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (base_url, video_id, video_id.lower()) + json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id info_json = self._download_webpage(json_url, video_id, u'Downloading json info') - links_webpage = self._download_webpage(videos_url, video_id, u'Downloading videos urls') self.report_extraction(video_id) - video_info = json.loads(info_json) - m_urls = list(re.finditer(r'<video src="(?P<ext>.*?):(?P<url>.*?)"', links_webpage)) - if m_urls is None or len(m_urls) == 0: - raise ExtractorError(u'Unable to extract video url') - # They are sorted from worst to best quality - m_url = m_urls[-1] - video_url = base_url + m_url.group('url') - ext = m_url.group('ext') - - return {'url': video_url, - 'ext': ext, - 'id': video_id, - 'title': video_info['title'], - 'thumbnail': video_info['img'], - 'upload_date': video_info['launchDate'].replace('/',''), - 'uploader': video_info['Artists'][0]['title'], - } + video_info = json.loads(info_json)['video'] + last_version = {'version': -1} + for version in video_info['videoVersions']: + # These are the HTTP downloads, other types are for different manifests + if version['sourceType'] == 2: + if version['version'] > last_version['version']: + last_version = version + if last_version['version'] == -1: + raise ExtractorError(u'Unable to extract last version of the video') + + renditions = xml.etree.ElementTree.fromstring(last_version['data']) + formats = [] + # Already sorted from worst to best quality + for rend in renditions.findall('rendition'): + attr = rend.attrib + f_url = attr['url'] + formats.append({ + 'url': f_url, + 'ext': determine_ext(f_url), + 'height': int(attr['frameheight']), + 'width': int(attr['frameWidth']), + }) + + date_epoch = int(self._search_regex( + r'/Date\((\d+)\)/', video_info['launchDate'], u'launch date'))/1000 + upload_date = datetime.datetime.fromtimestamp(date_epoch) + info = { + 'id': video_id, + 'title': video_info['title'], + 'formats': formats, + 'thumbnail': video_info['imageUrl'], + 'upload_date': upload_date.strftime('%Y%m%d'), + 'uploader': video_info['mainArtists'][0]['artistName'], + 'duration': video_info['duration'], + } + + # TODO: Remove when #980 has been merged + info.update(formats[-1]) + + return info diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py new file mode 100644 index 000000000..6b93afa50 --- /dev/null +++ b/youtube_dl/extractor/vice.py @@ -0,0 +1,38 @@ +import re + +from .common import InfoExtractor +from .ooyala import OoyalaIE +from ..utils import ExtractorError + + +class ViceIE(InfoExtractor): + _VALID_URL = r'http://www.vice.com/.*?/(?P<name>.+)' + + _TEST = { + u'url': u'http://www.vice.com/Fringes/cowboy-capitalists-part-1', + u'file': u'43cW1mYzpia9IlestBjVpd23Yu3afAfp.mp4', + u'info_dict': { + u'title': u'VICE_COWBOYCAPITALISTS_PART01_v1_VICE_WM_1080p.mov', + }, + u'params': { + # Requires ffmpeg (m3u8 manifest) + u'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + name = mobj.group('name') + webpage = self._download_webpage(url, name) + try: + ooyala_url = self._og_search_video_url(webpage) + except ExtractorError: + try: + embed_code = self._search_regex( + r'OO.Player.create\(\'ooyalaplayer\', \'(.+?)\'', webpage, + u'ooyala embed code') + ooyala_url = OoyalaIE._url_for_embed_code(embed_code) + except ExtractorError: + raise ExtractorError(u'The page doesn\'t contain a video', expected=True) + return self.url_result(ooyala_url, ie='Ooyala') + diff --git a/youtube_dl/extractor/viddler.py b/youtube_dl/extractor/viddler.py new file mode 100644 index 000000000..12c84a985 --- /dev/null +++ b/youtube_dl/extractor/viddler.py @@ -0,0 +1,64 @@ +import json +import re + +from .common import InfoExtractor +from ..utils import ( + determine_ext, +) + + +class ViddlerIE(InfoExtractor): + _VALID_URL = r'(?P<domain>https?://(?:www\.)?viddler.com)/(?:v|embed|player)/(?P<id>[0-9]+)' + _TEST = { + u"url": u"http://www.viddler.com/v/43903784", + u'file': u'43903784.mp4', + u'md5': u'fbbaedf7813e514eb7ca30410f439ac9', + u'info_dict': { + u"title": u"Video Made Easy", + u"uploader": u"viddler", + u"duration": 100.89, + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + embed_url = mobj.group('domain') + u'/embed/' + video_id + webpage = self._download_webpage(embed_url, video_id) + + video_sources_code = self._search_regex( + r"(?ms)sources\s*:\s*(\{.*?\})", webpage, u'video URLs') + video_sources = json.loads(video_sources_code.replace("'", '"')) + + formats = [{ + 'url': video_url, + 'format': format_id, + } for video_url, format_id in video_sources.items()] + + title = self._html_search_regex( + r"title\s*:\s*'([^']*)'", webpage, u'title') + uploader = self._html_search_regex( + r"authorName\s*:\s*'([^']*)'", webpage, u'uploader', fatal=False) + duration_s = self._html_search_regex( + r"duration\s*:\s*([0-9.]*)", webpage, u'duration', fatal=False) + duration = float(duration_s) if duration_s else None + thumbnail = self._html_search_regex( + r"thumbnail\s*:\s*'([^']*)'", + webpage, u'thumbnail', fatal=False) + + info = { + '_type': 'video', + 'id': video_id, + 'title': title, + 'thumbnail': thumbnail, + 'uploader': uploader, + 'duration': duration, + 'formats': formats, + } + + # TODO: Remove when #980 has been merged + info['formats'][-1]['ext'] = determine_ext(info['formats'][-1]['url']) + info.update(info['formats'][-1]) + + return info diff --git a/youtube_dl/extractor/videodetective.py b/youtube_dl/extractor/videodetective.py new file mode 100644 index 000000000..d89f84094 --- /dev/null +++ b/youtube_dl/extractor/videodetective.py @@ -0,0 +1,30 @@ +import re + +from .common import InfoExtractor +from .internetvideoarchive import InternetVideoArchiveIE +from ..utils import ( + compat_urlparse, +) + + +class VideoDetectiveIE(InfoExtractor): + _VALID_URL = r'https?://www\.videodetective\.com/[^/]+/[^/]+/(?P<id>\d+)' + + _TEST = { + u'url': u'http://www.videodetective.com/movies/kick-ass-2/194487', + u'file': u'194487.mp4', + u'info_dict': { + u'title': u'KICK-ASS 2', + u'description': u'md5:65ba37ad619165afac7d432eaded6013', + u'duration': 138, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + webpage = self._download_webpage(url, video_id) + og_video = self._og_search_video_url(webpage) + query = compat_urlparse.urlparse(og_video).query + return self.url_result(InternetVideoArchiveIE._build_url(query), + ie=InternetVideoArchiveIE.ie_key()) diff --git a/youtube_dl/extractor/videofyme.py b/youtube_dl/extractor/videofyme.py new file mode 100644 index 000000000..94f64ffa5 --- /dev/null +++ b/youtube_dl/extractor/videofyme.py @@ -0,0 +1,48 @@ +import re +import xml.etree.ElementTree + +from .common import InfoExtractor +from ..utils import ( + find_xpath_attr, + determine_ext, +) + +class VideofyMeIE(InfoExtractor): + _VALID_URL = r'https?://(www.videofy.me/.+?|p.videofy.me/v)/(?P<id>\d+)(&|#|$)' + IE_NAME = u'videofy.me' + + _TEST = { + u'url': u'http://www.videofy.me/thisisvideofyme/1100701', + u'file': u'1100701.mp4', + u'md5': u'c77d700bdc16ae2e9f3c26019bd96143', + u'info_dict': { + u'title': u'This is VideofyMe', + u'description': None, + u'uploader': u'VideofyMe', + u'uploader_id': u'thisisvideofyme', + }, + + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + config_xml = self._download_webpage('http://sunshine.videofy.me/?videoId=%s' % video_id, + video_id) + config = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8')) + video = config.find('video') + sources = video.find('sources') + url_node = next(node for node in [find_xpath_attr(sources, 'source', 'id', 'HQ %s' % key) + for key in ['on', 'av', 'off']] if node is not None) + video_url = url_node.find('url').text + + return {'id': video_id, + 'title': video.find('title').text, + 'url': video_url, + 'ext': determine_ext(video_url), + 'thumbnail': video.find('thumb').text, + 'description': video.find('description').text, + 'uploader': config.find('blog/name').text, + 'uploader_id': video.find('identifier').text, + 'view_count': re.search(r'\d+', video.find('views').text).group(), + } diff --git a/youtube_dl/extractor/videopremium.py b/youtube_dl/extractor/videopremium.py new file mode 100644 index 000000000..65f39b982 --- /dev/null +++ b/youtube_dl/extractor/videopremium.py @@ -0,0 +1,40 @@ +import re +import random + +from .common import InfoExtractor + + +class VideoPremiumIE(InfoExtractor): + _VALID_URL = r'(?:https?://)?(?:www\.)?videopremium\.tv/(?P<id>\w+)(?:/.*)?' + _TEST = { + u'url': u'http://videopremium.tv/4w7oadjsf156', + u'file': u'4w7oadjsf156.f4v', + u'info_dict': { + u"title": u"youtube-dl_test_video____a_________-BaW_jenozKc.mp4.mp4" + }, + u'params': { + u'skip_download': True, + }, + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + + video_id = mobj.group('id') + webpage_url = 'http://videopremium.tv/' + video_id + webpage = self._download_webpage(webpage_url, video_id) + + self.report_extraction(video_id) + + video_title = self._html_search_regex(r'<h2(?:.*?)>\s*(.+?)\s*<', + webpage, u'video title') + + return [{ + 'id': video_id, + 'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16), + 'play_path': "mp4:%s.f4v" % video_id, + 'page_url': "http://videopremium.tv/" + video_id, + 'player_url': "http://videopremium.tv/uplayer/uppod.swf", + 'ext': 'f4v', + 'title': video_title, + }] diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py index ac32043c1..cea29f035 100644 --- a/youtube_dl/extractor/vimeo.py +++ b/youtube_dl/extractor/vimeo.py @@ -1,5 +1,6 @@ import json import re +import itertools from .common import InfoExtractor from ..utils import ( @@ -16,21 +17,44 @@ class VimeoIE(InfoExtractor): """Information extractor for vimeo.com.""" # _VALID_URL matches Vimeo URLs - _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)(?:[?].*)?$' + _VALID_URL = r'(?P<proto>https?://)?(?:(?:www|player)\.)?vimeo(?P<pro>pro)?\.com/(?:(?:(?:groups|album)/[^/]+)|(?:.*?)/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)/?(?:[?].*)?$' _NETRC_MACHINE = 'vimeo' IE_NAME = u'vimeo' - _TEST = { - u'url': u'http://vimeo.com/56015672', - u'file': u'56015672.mp4', - u'md5': u'8879b6cc097e987f02484baf890129e5', - u'info_dict': { - u"upload_date": u"20121220", - u"description": u"This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", - u"uploader_id": u"user7108434", - u"uploader": u"Filippo Valsorda", - u"title": u"youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550" - } - } + _TESTS = [ + { + u'url': u'http://vimeo.com/56015672', + u'file': u'56015672.mp4', + u'md5': u'8879b6cc097e987f02484baf890129e5', + u'info_dict': { + u"upload_date": u"20121220", + u"description": u"This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", + u"uploader_id": u"user7108434", + u"uploader": u"Filippo Valsorda", + u"title": u"youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550", + }, + }, + { + u'url': u'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876', + u'file': u'68093876.mp4', + u'md5': u'3b5ca6aa22b60dfeeadf50b72e44ed82', + u'note': u'Vimeo Pro video (#1197)', + u'info_dict': { + u'uploader_id': u'openstreetmapus', + u'uploader': u'OpenStreetMap US', + u'title': u'Andy Allan - Putting the Carto into OpenStreetMap Cartography', + }, + }, + { + u'url': u'http://player.vimeo.com/video/54469442', + u'file': u'54469442.mp4', + u'md5': u'619b811a4417aa4abe78dc653becf511', + u'note': u'Videos that embed the url in the player page', + u'info_dict': { + u'title': u'Kathy Sierra: Building the minimum Badass User, Business of Software', + u'uploader': u'The BLN & Business of Software', + }, + }, + ] def _login(self): (username, password) = self._get_login_info() @@ -82,7 +106,9 @@ class VimeoIE(InfoExtractor): video_id = mobj.group('id') if not mobj.group('proto'): url = 'https://' + url - if mobj.group('direct_link') or mobj.group('pro'): + elif mobj.group('pro'): + url = 'http://player.vimeo.com/video/' + video_id + elif mobj.group('direct_link'): url = 'https://vimeo.com/' + video_id # Retrieve video webpage to extract further information @@ -96,7 +122,8 @@ class VimeoIE(InfoExtractor): # Extract the config JSON try: - config = webpage.split(' = {config:')[1].split(',assets:')[0] + config = self._search_regex([r' = {config:({.+?}),assets:', r'c=({.+?);'], + webpage, u'info section', flags=re.DOTALL) config = json.loads(config) except: if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage): @@ -116,12 +143,22 @@ class VimeoIE(InfoExtractor): video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None # Extract video thumbnail - video_thumbnail = config["video"]["thumbnail"] + video_thumbnail = config["video"].get("thumbnail") + if video_thumbnail is None: + _, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in config["video"]["thumbs"].items())[-1] # Extract video description - video_description = get_element_by_attribute("itemprop", "description", webpage) - if video_description: video_description = clean_html(video_description) - else: video_description = u'' + video_description = None + try: + video_description = get_element_by_attribute("itemprop", "description", webpage) + if video_description: video_description = clean_html(video_description) + except AssertionError as err: + # On some pages like (http://player.vimeo.com/video/54469442) the + # html tags are not closed, python 2.6 cannot handle it + if err.args[0] == 'we should not get here!': + pass + else: + raise # Extract upload date video_upload_date = None @@ -138,14 +175,15 @@ class VimeoIE(InfoExtractor): # TODO bind to format param codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] files = { 'hd': [], 'sd': [], 'other': []} + config_files = config["video"].get("files") or config["request"].get("files") for codec_name, codec_extension in codecs: - if codec_name in config["video"]["files"]: - if 'hd' in config["video"]["files"][codec_name]: + if codec_name in config_files: + if 'hd' in config_files[codec_name]: files['hd'].append((codec_name, codec_extension, 'hd')) - elif 'sd' in config["video"]["files"][codec_name]: + elif 'sd' in config_files[codec_name]: files['sd'].append((codec_name, codec_extension, 'sd')) else: - files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0])) + files['other'].append((codec_name, codec_extension, config_files[codec_name][0])) for quality in ('hd', 'sd', 'other'): if len(files[quality]) > 0: @@ -157,8 +195,12 @@ class VimeoIE(InfoExtractor): else: raise ExtractorError(u'No known codec found') - video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ - %(video_id, sig, timestamp, video_quality, video_codec.upper()) + video_url = None + if isinstance(config_files[video_codec], dict): + video_url = config_files[video_codec][video_quality].get("url") + if video_url is None: + video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ + %(video_id, sig, timestamp, video_quality, video_codec.upper()) return [{ 'id': video_id, @@ -171,3 +213,31 @@ class VimeoIE(InfoExtractor): 'thumbnail': video_thumbnail, 'description': video_description, }] + + +class VimeoChannelIE(InfoExtractor): + IE_NAME = u'vimeo:channel' + _VALID_URL = r'(?:https?://)?vimeo.\com/channels/(?P<id>[^/]+)' + _MORE_PAGES_INDICATOR = r'<a.+?rel="next"' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + channel_id = mobj.group('id') + video_ids = [] + + for pagenum in itertools.count(1): + webpage = self._download_webpage('http://vimeo.com/channels/%s/videos/page:%d' % (channel_id, pagenum), + channel_id, u'Downloading page %s' % pagenum) + video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage)) + if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None: + break + + entries = [self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo') + for video_id in video_ids] + channel_title = self._html_search_regex(r'<a href="/channels/%s">(.*?)</a>' % channel_id, + webpage, u'channel title') + return {'_type': 'playlist', + 'id': channel_id, + 'title': channel_title, + 'entries': entries, + } diff --git a/youtube_dl/extractor/wat.py b/youtube_dl/extractor/wat.py index 0d1302cd2..29c25f0e3 100644 --- a/youtube_dl/extractor/wat.py +++ b/youtube_dl/extractor/wat.py @@ -6,7 +6,6 @@ import re from .common import InfoExtractor from ..utils import ( - compat_urllib_parse, unified_strdate, ) @@ -17,11 +16,12 @@ class WatIE(InfoExtractor): _TEST = { u'url': u'http://www.wat.tv/video/world-war-philadelphia-vost-6bv55_2fjr7_.html', u'file': u'10631273.mp4', - u'md5': u'0a4fe7870f31eaeabb5e25fd8da8414a', + u'md5': u'd8b2231e1e333acd12aad94b80937e19', u'info_dict': { u'title': u'World War Z - Philadelphia VOST', u'description': u'La menace est partout. Que se passe-t-il à Philadelphia ?\r\nWORLD WAR Z, avec Brad Pitt, au cinéma le 3 juillet.\r\nhttp://www.worldwarz.fr', - } + }, + u'skip': u'Sometimes wat serves the whole file with the --test option', } def download_video_info(self, real_id): @@ -58,20 +58,8 @@ class WatIE(InfoExtractor): # Otherwise we can continue and extract just one part, we have to use # the short id for getting the video url - player_data = compat_urllib_parse.urlencode({'shortVideoId': short_id, - 'html5': '1'}) - player_info = self._download_webpage('http://www.wat.tv/player?' + player_data, - real_id, u'Downloading player info') - player = json.loads(player_info)['player'] - html5_player = self._html_search_regex(r'iframe src="(.*?)"', player, - 'html5 player') - player_webpage = self._download_webpage(html5_player, real_id, - u'Downloading player webpage') - - video_url = self._search_regex(r'urlhtml5 : "(.*?)"', player_webpage, - 'video url') info = {'id': real_id, - 'url': video_url, + 'url': 'http://wat.tv/get/android5/%s.mp4' % real_id, 'ext': 'mp4', 'title': first_chapter['title'], 'thumbnail': first_chapter['preview'], diff --git a/youtube_dl/extractor/websurg.py b/youtube_dl/extractor/websurg.py new file mode 100644 index 000000000..7d335d444 --- /dev/null +++ b/youtube_dl/extractor/websurg.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +import re + +from ..utils import ( + compat_urllib_request, + compat_urllib_parse +) + +from .common import InfoExtractor + +class WeBSurgIE(InfoExtractor): + IE_NAME = u'websurg.com' + _VALID_URL = r'http://.*?\.websurg\.com/MEDIA/\?noheader=1&doi=(.*)' + + _TEST = { + u'url': u'http://www.websurg.com/MEDIA/?noheader=1&doi=vd01en4012', + u'file': u'vd01en4012.mp4', + u'params': { + u'skip_download': True, + } + } + + _LOGIN_URL = 'http://www.websurg.com/inc/login/login_div.ajax.php?login=1' + + def _real_initialize(self): + + login_form = { + 'username': self._downloader.params['username'], + 'password': self._downloader.params['password'], + 'Submit': 1 + } + + request = compat_urllib_request.Request( + self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) + request.add_header( + 'Content-Type', 'application/x-www-form-urlencoded;charset=utf-8') + compat_urllib_request.urlopen(request).info() + webpage = self._download_webpage(self._LOGIN_URL, '', 'Logging in') + + if webpage != 'OK': + self._downloader.report_error( + u'Unable to log in: bad username/password') + + def _real_extract(self, url): + video_id = re.match(self._VALID_URL, url).group(1) + + webpage = self._download_webpage(url, video_id) + + url_info = re.search(r'streamer="(.*?)" src="(.*?)"', webpage) + + return {'id': video_id, + 'title': self._og_search_title(webpage), + 'description': self._og_search_description(webpage), + 'ext' : 'mp4', + 'url' : url_info.group(1) + '/' + url_info.group(2), + 'thumbnail': self._og_search_thumbnail(webpage) + } diff --git a/youtube_dl/extractor/weibo.py b/youtube_dl/extractor/weibo.py new file mode 100644 index 000000000..0757495bd --- /dev/null +++ b/youtube_dl/extractor/weibo.py @@ -0,0 +1,48 @@ +# coding: utf-8 + +import re +import json + +from .common import InfoExtractor + +class WeiboIE(InfoExtractor): + """ + The videos in Weibo come from different sites, this IE just finds the link + to the external video and returns it. + """ + _VALID_URL = r'https?://video\.weibo\.com/v/weishipin/t_(?P<id>.+?)\.htm' + + _TEST = { + u'url': u'http://video.weibo.com/v/weishipin/t_zjUw2kZ.htm', + u'file': u'98322879.flv', + u'info_dict': { + u'title': u'魔声耳机最新广告“All Eyes On Us”', + }, + u'note': u'Sina video', + u'params': { + u'skip_download': True, + }, + } + + # Additional example videos from different sites + # Youku: http://video.weibo.com/v/weishipin/t_zQGDWQ8.htm + # 56.com: http://video.weibo.com/v/weishipin/t_zQ44HxN.htm + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE) + video_id = mobj.group('id') + info_url = 'http://video.weibo.com/?s=v&a=play_list&format=json&mix_video_id=t_%s' % video_id + info_page = self._download_webpage(info_url, video_id) + info = json.loads(info_page) + + videos_urls = map(lambda v: v['play_page_url'], info['result']['data']) + #Prefer sina video since they have thumbnails + videos_urls = sorted(videos_urls, key=lambda u: u'video.sina.com' in u) + player_url = videos_urls[-1] + m_sina = re.match(r'https?://video.sina.com.cn/v/b/(\d+)-\d+.html', player_url) + if m_sina is not None: + self.to_screen('Sina video detected') + sina_id = m_sina.group(1) + player_url = 'http://you.video.sina.com.cn/swf/quotePlayer.swf?vid=%s' % sina_id + return self.url_result(player_url) + diff --git a/youtube_dl/extractor/worldstarhiphop.py b/youtube_dl/extractor/worldstarhiphop.py index 5b9779c05..3237596a3 100644 --- a/youtube_dl/extractor/worldstarhiphop.py +++ b/youtube_dl/extractor/worldstarhiphop.py @@ -21,6 +21,13 @@ class WorldStarHipHopIE(InfoExtractor): webpage_src = self._download_webpage(url, video_id) + m_vevo_id = re.search(r'videoId=(.*?)&?', + webpage_src) + + if m_vevo_id is not None: + self.to_screen(u'Vevo video detected:') + return self.url_result('vevo:%s' % m_vevo_id.group(1), ie='Vevo') + video_url = self._search_regex(r'so\.addVariable\("file","(.*?)"\)', webpage_src, u'video URL') diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py index 0f1feeffd..361619694 100644 --- a/youtube_dl/extractor/xhamster.py +++ b/youtube_dl/extractor/xhamster.py @@ -3,15 +3,16 @@ import re from .common import InfoExtractor from ..utils import ( compat_urllib_parse, - + unescapeHTML, + determine_ext, ExtractorError, ) class XHamsterIE(InfoExtractor): """Information Extractor for xHamster""" - _VALID_URL = r'(?:http://)?(?:www.)?xhamster\.com/movies/(?P<id>[0-9]+)/.*\.html' - _TEST = { + _VALID_URL = r'(?:http://)?(?:www\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?' + _TESTS = [{ u'url': u'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html', u'file': u'1509445.flv', u'md5': u'9f48e0e8d58e3076bb236ff412ab62fa', @@ -20,13 +21,24 @@ class XHamsterIE(InfoExtractor): u"uploader_id": u"Ruseful2011", u"title": u"FemaleAgent Shy beauty takes the bait" } - } + }, + { + u'url': u'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd', + u'file': u'2221348.flv', + u'md5': u'e767b9475de189320f691f49c679c4c7', + u'info_dict': { + u"upload_date": u"20130914", + u"uploader_id": u"jojo747400", + u"title": u"Britney Spears Sexy Booty" + } + }] def _real_extract(self,url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') - mrss_url = 'http://xhamster.com/movies/%s/.html' % video_id + seo = mobj.group('seo') + mrss_url = 'http://xhamster.com/movies/%s/%s.html?hd' % (video_id, seo) webpage = self._download_webpage(mrss_url, video_id) mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage) @@ -36,15 +48,16 @@ class XHamsterIE(InfoExtractor): video_url = compat_urllib_parse.unquote(mobj.group('file')) else: video_url = mobj.group('server')+'/key='+mobj.group('file') - video_extension = video_url.split('.')[-1] video_title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage, u'title') - # Can't see the description anywhere in the UI - # video_description = self._html_search_regex(r'<span>Description: </span>(?P<description>[^<]+)', - # webpage, u'description', fatal=False) - # if video_description: video_description = unescapeHTML(video_description) + # Only a few videos have an description + mobj = re.search('<span>Description: </span>(?P<description>[^<]+)', webpage) + if mobj: + video_description = unescapeHTML(mobj.group('description')) + else: + video_description = None mobj = re.search(r'hint=\'(?P<upload_date_Y>[0-9]{4})-(?P<upload_date_m>[0-9]{2})-(?P<upload_date_d>[0-9]{2}) [0-9]{2}:[0-9]{2}:[0-9]{2} [A-Z]{3,4}\'', webpage) if mobj: @@ -62,9 +75,9 @@ class XHamsterIE(InfoExtractor): return [{ 'id': video_id, 'url': video_url, - 'ext': video_extension, + 'ext': determine_ext(video_url), 'title': video_title, - # 'description': video_description, + 'description': video_description, 'upload_date': video_upload_date, 'uploader_id': video_uploader_id, 'thumbnail': video_thumbnail diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py index 32d5b9477..464b498f5 100644 --- a/youtube_dl/extractor/yahoo.py +++ b/youtube_dl/extractor/yahoo.py @@ -1,4 +1,3 @@ -import datetime import itertools import json import re @@ -6,86 +5,104 @@ import re from .common import InfoExtractor, SearchInfoExtractor from ..utils import ( compat_urllib_parse, - - ExtractorError, + compat_urlparse, + determine_ext, + clean_html, ) + class YahooIE(InfoExtractor): IE_DESC = u'Yahoo screen' _VALID_URL = r'http://screen\.yahoo\.com/.*?-(?P<id>\d*?)\.html' - _TEST = { - u'url': u'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html', - u'file': u'214727115.flv', - u'md5': u'2e717f169c1be93d84d3794a00d4a325', - u'info_dict': { - u"title": u"Julian Smith & Travis Legg Watch Julian Smith" + _TESTS = [ + { + u'url': u'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html', + u'file': u'214727115.flv', + u'info_dict': { + u'title': u'Julian Smith & Travis Legg Watch Julian Smith', + u'description': u'Julian and Travis watch Julian Smith', + }, + u'params': { + # Requires rtmpdump + u'skip_download': True, + }, }, - u'skip': u'Requires rtmpdump' - } + { + u'url': u'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html', + u'file': u'103000935.flv', + u'info_dict': { + u'title': u'Codefellas - The Cougar Lies with Spanish Moss', + u'description': u'Agent Topple\'s mustache does its dirty work, and Nicole brokers a deal for peace. But why is the NSA collecting millions of Instagram brunch photos? And if your waffles have nothing to hide, what are they so worried about?', + }, + u'params': { + # Requires rtmpdump + u'skip_download': True, + }, + }, + ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) - if mobj is None: - raise ExtractorError(u'Invalid URL: %s' % url) video_id = mobj.group('id') webpage = self._download_webpage(url, video_id) - m_id = re.search(r'YUI\.namespace\("Media"\)\.CONTENT_ID = "(?P<new_id>.+?)";', webpage) - if m_id is None: - # TODO: Check which url parameters are required - info_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;lmsoverride=1;outputformat=mrss;cb=974419660;id=%s;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id - webpage = self._download_webpage(info_url, video_id, u'Downloading info webpage') - info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.* - <description><!\[CDATA\[(?P<description>.*?)\]\]></description>.* - <media:pubStart><!\[CDATA\[(?P<date>.*?)\ .*\]\]></media:pubStart>.* - <media:content\ medium="image"\ url="(?P<thumb>.*?)"\ name="LARGETHUMB" - ''' - self.report_extraction(video_id) - m_info = re.search(info_re, webpage, re.VERBOSE|re.DOTALL) - if m_info is None: - raise ExtractorError(u'Unable to extract video info') - video_title = m_info.group('title') - video_description = m_info.group('description') - video_thumb = m_info.group('thumb') - video_date = m_info.group('date') - video_date = datetime.datetime.strptime(video_date, '%m/%d/%Y').strftime('%Y%m%d') - - # TODO: Find a way to get mp4 videos - rest_url = 'http://cosmos.bcst.yahoo.com/rest/v2/pops;element=stream;outputformat=mrss;id=%s;lmsoverride=1;bw=375;dynamicstream=1;cb=83521105;tech=flv,mp4;rd=news.yahoo.com;datacontext=mdb;lg=KCa2IihxG3qE60vQ7HtyUy' % video_id - webpage = self._download_webpage(rest_url, video_id, u'Downloading video url webpage') - m_rest = re.search(r'<media:content url="(?P<url>.*?)" path="(?P<path>.*?)"', webpage) - video_url = m_rest.group('url') - video_path = m_rest.group('path') - if m_rest is None: - raise ExtractorError(u'Unable to extract video url') + items_json = self._search_regex(r'YVIDEO_INIT_ITEMS = ({.*?});$', + webpage, u'items', flags=re.MULTILINE) + items = json.loads(items_json) + info = items['mediaItems']['query']['results']['mediaObj'][0] + # The 'meta' field is not always in the video webpage, we request it + # from another page + long_id = info['id'] + query = ('SELECT * FROM yahoo.media.video.streams WHERE id="%s"' + ' AND plrs="86Gj0vCaSzV_Iuf6hNylf2"' % long_id) + data = compat_urllib_parse.urlencode({ + 'q': query, + 'env': 'prod', + 'format': 'json', + }) + query_result_json = self._download_webpage( + 'http://video.query.yahoo.com/v1/public/yql?' + data, + video_id, u'Downloading video info') + query_result = json.loads(query_result_json) + info = query_result['query']['results']['mediaObj'][0] + meta = info['meta'] + + formats = [] + for s in info['streams']: + format_info = { + 'width': s.get('width'), + 'height': s.get('height'), + 'bitrate': s.get('bitrate'), + } + + host = s['host'] + path = s['path'] + if host.startswith('rtmp'): + format_info.update({ + 'url': host, + 'play_path': path, + 'ext': 'flv', + }) + else: + format_url = compat_urlparse.urljoin(host, path) + format_info['url'] = format_url + format_info['ext'] = determine_ext(format_url) + + formats.append(format_info) + formats = sorted(formats, key=lambda f:(f['height'], f['width'])) + + info = { + 'id': video_id, + 'title': meta['title'], + 'formats': formats, + 'description': clean_html(meta['description']), + 'thumbnail': meta['thumbnail'], + } + # TODO: Remove when #980 has been merged + info.update(formats[-1]) - else: # We have to use a different method if another id is defined - long_id = m_id.group('new_id') - info_url = 'http://video.query.yahoo.com/v1/public/yql?q=SELECT%20*%20FROM%20yahoo.media.video.streams%20WHERE%20id%3D%22' + long_id + '%22%20AND%20format%3D%22mp4%2Cflv%22%20AND%20protocol%3D%22rtmp%2Chttp%22%20AND%20plrs%3D%2286Gj0vCaSzV_Iuf6hNylf2%22%20AND%20acctid%3D%22389%22%20AND%20plidl%3D%22%22%20AND%20pspid%3D%22792700001%22%20AND%20offnetwork%3D%22false%22%20AND%20site%3D%22ivy%22%20AND%20lang%3D%22en-US%22%20AND%20region%3D%22US%22%20AND%20override%3D%22none%22%3B&env=prod&format=json&callback=YUI.Env.JSONP.yui_3_8_1_1_1368368376830_335' - webpage = self._download_webpage(info_url, video_id, u'Downloading info json') - json_str = re.search(r'YUI.Env.JSONP.yui.*?\((.*?)\);', webpage).group(1) - info = json.loads(json_str) - res = info[u'query'][u'results'][u'mediaObj'][0] - stream = res[u'streams'][0] - video_path = stream[u'path'] - video_url = stream[u'host'] - meta = res[u'meta'] - video_title = meta[u'title'] - video_description = meta[u'description'] - video_thumb = meta[u'thumbnail'] - video_date = None # I can't find it + return info - info_dict = { - 'id': video_id, - 'url': video_url, - 'play_path': video_path, - 'title':video_title, - 'description': video_description, - 'thumbnail': video_thumb, - 'upload_date': video_date, - 'ext': 'flv', - } - return info_dict class YahooSearchIE(SearchInfoExtractor): IE_DESC = u'Yahoo screen search' diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py index eb9829801..9d88c17f5 100644 --- a/youtube_dl/extractor/youku.py +++ b/youtube_dl/extractor/youku.py @@ -13,7 +13,7 @@ from ..utils import ( class YoukuIE(InfoExtractor): - _VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html' + _VALID_URL = r'(?:(?:http://)?(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|youku:)(?P<ID>[A-Za-z0-9]+)(?:\.html|/v\.swf|)' _TEST = { u"url": u"http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html", u"file": u"XNDgyMDQ2NTQw_part00.flv", @@ -66,6 +66,12 @@ class YoukuIE(InfoExtractor): self.report_extraction(video_id) try: config = json.loads(jsondata) + error_code = config['data'][0].get('error_code') + if error_code: + # -8 means blocked outside China. + error = config['data'][0].get('error') # Chinese and English, separated by newline. + raise ExtractorError(error or u'Server reported error %i' % error_code, + expected=True) video_title = config['data'][0]['title'] seed = config['data'][0]['seed'] @@ -89,6 +95,7 @@ class YoukuIE(InfoExtractor): fileid = config['data'][0]['streamfileids'][format] keys = [s['k'] for s in config['data'][0]['segs'][format]] + # segs is usually a dictionary, but an empty *list* if an error occured. except (UnicodeDecodeError, ValueError, KeyError): raise ExtractorError(u'Unable to extract info section') diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py index d1156bf42..b1f93dd1b 100644 --- a/youtube_dl/extractor/youporn.py +++ b/youtube_dl/extractor/youporn.py @@ -12,14 +12,16 @@ from ..utils import ( unescapeHTML, unified_strdate, ) - +from ..aes import ( + aes_decrypt_text +) class YouPornIE(InfoExtractor): _VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)' _TEST = { u'url': u'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/', u'file': u'505835.mp4', - u'md5': u'c37ddbaaa39058c76a7e86c6813423c1', + u'md5': u'71ec5fcfddacf80f495efa8b6a8d9a89', u'info_dict': { u"upload_date": u"20101221", u"description": u"Love & Sex Answers: http://bit.ly/DanAndJenn -- Is It Unhealthy To Masturbate Daily?", @@ -49,6 +51,7 @@ class YouPornIE(InfoExtractor): req = compat_urllib_request.Request(url) req.add_header('Cookie', 'age_verified=1') webpage = self._download_webpage(req, video_id) + age_limit = self._rta_search(webpage) # Get JSON parameters json_params = self._search_regex(r'var currentVideo = new Video\((.*)\);', webpage, u'JSON parameters') @@ -75,7 +78,15 @@ class YouPornIE(InfoExtractor): # Get all of the links from the page LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">' links = re.findall(LINK_RE, download_list_html) - if(len(links) == 0): + + # Get link of hd video if available + mobj = re.search(r'var encryptedQuality720URL = \'(?P<encrypted_video_url>[a-zA-Z0-9+/]+={0,2})\';', webpage) + if mobj != None: + encrypted_video_url = mobj.group(u'encrypted_video_url') + video_url = aes_decrypt_text(encrypted_video_url, video_title, 32).decode('utf-8') + links = [video_url] + links + + if not links: raise ExtractorError(u'ERROR: no known formats available for video') self.to_screen(u'Links found: %d' % len(links)) @@ -105,14 +116,15 @@ class YouPornIE(InfoExtractor): 'ext': extension, 'format': format, 'thumbnail': thumbnail, - 'description': video_description + 'description': video_description, + 'age_limit': age_limit, }) if self._downloader.params.get('listformats', None): self._print_formats(formats) return - req_format = self._downloader.params.get('format', None) + req_format = self._downloader.params.get('format', 'best') self.to_screen(u'Format: %s' % req_format) if req_format is None or req_format == 'best': diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 91cd2192f..4347651d7 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -1,69 +1,256 @@ # coding: utf-8 +import collections +import errno +import io +import itertools import json -import netrc +import os.path import re import socket -import itertools +import string +import struct +import traceback +import xml.etree.ElementTree +import zlib from .common import InfoExtractor, SearchInfoExtractor +from .subtitles import SubtitlesInfoExtractor from ..utils import ( + compat_chr, compat_http_client, compat_parse_qs, compat_urllib_error, compat_urllib_parse, compat_urllib_request, + compat_urlparse, compat_str, clean_html, + get_cachedir, get_element_by_id, ExtractorError, unescapeHTML, unified_strdate, orderedSet, + write_json_file, ) +class YoutubeBaseInfoExtractor(InfoExtractor): + """Provide base functions for Youtube extractors""" + _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' + _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' + _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' + _NETRC_MACHINE = 'youtube' + # If True it will raise an error if no login info is provided + _LOGIN_REQUIRED = False + + def report_lang(self): + """Report attempt to set language.""" + self.to_screen(u'Setting language') + + def _set_language(self): + request = compat_urllib_request.Request(self._LANG_URL) + try: + self.report_lang() + compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.report_warning(u'unable to set language: %s' % compat_str(err)) + return False + return True + + def _login(self): + (username, password) = self._get_login_info() + # No authentication to be performed + if username is None: + if self._LOGIN_REQUIRED: + raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True) + return False + + request = compat_urllib_request.Request(self._LOGIN_URL) + try: + login_page = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err)) + return False + + galx = None + dsh = None + match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page) + if match: + galx = match.group(1) + match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page) + if match: + dsh = match.group(1) + + # Log in + login_form_strs = { + u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', + u'Email': username, + u'GALX': galx, + u'Passwd': password, + u'PersistentCookie': u'yes', + u'_utf8': u'霱', + u'bgresponse': u'js_disabled', + u'checkConnection': u'', + u'checkedDomains': u'youtube', + u'dnConn': u'', + u'dsh': dsh, + u'pstMsg': u'0', + u'rmShown': u'1', + u'secTok': u'', + u'signIn': u'Sign in', + u'timeStmp': u'', + u'service': u'youtube', + u'uilel': u'3', + u'hl': u'en_US', + } + # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode + # chokes on unicode + login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items()) + login_data = compat_urllib_parse.urlencode(login_form).encode('ascii') + request = compat_urllib_request.Request(self._LOGIN_URL, login_data) + try: + self.report_login() + login_results = compat_urllib_request.urlopen(request).read().decode('utf-8') + if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None: + self._downloader.report_warning(u'unable to log in: bad username or password') + return False + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.report_warning(u'unable to log in: %s' % compat_str(err)) + return False + return True -class YoutubeIE(InfoExtractor): + def _confirm_age(self): + age_form = { + 'next_url': '/', + 'action_confirm': 'Confirm', + } + request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form)) + try: + self.report_age_confirmation() + compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err)) + return True + + def _real_initialize(self): + if self._downloader is None: + return + if not self._set_language(): + return + if not self._login(): + return + self._confirm_age() + + +class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): IE_DESC = u'YouTube.com' _VALID_URL = r"""^ ( (?:https?://)? # http(s):// (optional) - (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| - tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains + (?:(?:(?:(?:\w+\.)?youtube(?:-nocookie)?\.com/| + tube\.majestyc\.net/| + youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: (?:(?:v|embed|e)/) # v/ or embed/ or e/ |(?: # or the v= param in all its forms - (?:watch|movie(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) + (?:(?:watch|movie)(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) v= ) - )? # optional -> youtube.com/xxxx is OK + )) + |youtu\.be/ # just youtu.be/xxxx + ) )? # all until now is optional -> you can pass the naked ID - ([0-9A-Za-z_-]+) # here is it! the YouTube video ID + ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?(1).+)? # if we found the ID, everything can follow $""" - _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' - _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' - _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' - _NETRC_MACHINE = 'youtube' # Listed in order of quality - _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] - _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] + _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '36', '17', '13', + # Apple HTTP Live Streaming + '96', '95', '94', '93', '92', '132', '151', + # 3D + '85', '84', '102', '83', '101', '82', '100', + # Dash video + '138', '137', '248', '136', '247', '135', '246', + '245', '244', '134', '243', '133', '242', '160', + # Dash audio + '141', '172', '140', '171', '139', + ] + _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '36', '17', '13', + # Apple HTTP Live Streaming + '96', '95', '94', '93', '92', '132', '151', + # 3D + '85', '102', '84', '101', '83', '100', '82', + # Dash video + '138', '248', '137', '247', '136', '246', '245', + '244', '135', '243', '134', '242', '133', '160', + # Dash audio + '172', '141', '171', '140', '139', + ] + _video_formats_map = { + 'flv': ['35', '34', '6', '5'], + '3gp': ['36', '17', '13'], + 'mp4': ['38', '37', '22', '18'], + 'webm': ['46', '45', '44', '43'], + } _video_extensions = { '13': '3gp', - '17': 'mp4', + '17': '3gp', '18': 'mp4', '22': 'mp4', + '36': '3gp', '37': 'mp4', '38': 'mp4', '43': 'webm', '44': 'webm', '45': 'webm', '46': 'webm', + + # 3d videos + '82': 'mp4', + '83': 'mp4', + '84': 'mp4', + '85': 'mp4', + '100': 'webm', + '101': 'webm', + '102': 'webm', + + # Apple HTTP Live Streaming + '92': 'mp4', + '93': 'mp4', + '94': 'mp4', + '95': 'mp4', + '96': 'mp4', + '132': 'mp4', + '151': 'mp4', + + # Dash mp4 + '133': 'mp4', + '134': 'mp4', + '135': 'mp4', + '136': 'mp4', + '137': 'mp4', + '138': 'mp4', + '139': 'mp4', + '140': 'mp4', + '141': 'mp4', + '160': 'mp4', + + # Dash webm + '171': 'webm', + '172': 'webm', + '242': 'webm', + '243': 'webm', + '244': 'webm', + '245': 'webm', + '246': 'webm', + '247': 'webm', + '248': 'webm', } _video_dimensions = { '5': '240x400', @@ -74,13 +261,76 @@ class YoutubeIE(InfoExtractor): '22': '720x1280', '34': '360x640', '35': '480x854', + '36': '240x320', '37': '1080x1920', '38': '3072x4096', '43': '360x640', '44': '480x854', '45': '720x1280', '46': '1080x1920', + '82': '360p', + '83': '480p', + '84': '720p', + '85': '1080p', + '92': '240p', + '93': '360p', + '94': '480p', + '95': '720p', + '96': '1080p', + '100': '360p', + '101': '480p', + '102': '720p', + '132': '240p', + '151': '72p', + '133': '240p', + '134': '360p', + '135': '480p', + '136': '720p', + '137': '1080p', + '138': '>1080p', + '139': '48k', + '140': '128k', + '141': '256k', + '160': '192p', + '171': '128k', + '172': '256k', + '242': '240p', + '243': '360p', + '244': '480p', + '245': '480p', + '246': '480p', + '247': '720p', + '248': '1080p', + } + _special_itags = { + '82': '3D', + '83': '3D', + '84': '3D', + '85': '3D', + '100': '3D', + '101': '3D', + '102': '3D', + '133': 'DASH Video', + '134': 'DASH Video', + '135': 'DASH Video', + '136': 'DASH Video', + '137': 'DASH Video', + '138': 'DASH Video', + '139': 'DASH Audio', + '140': 'DASH Audio', + '141': 'DASH Audio', + '160': 'DASH Video', + '171': 'DASH Audio', + '172': 'DASH Audio', + '242': 'DASH Video', + '243': 'DASH Video', + '244': 'DASH Video', + '245': 'DASH Video', + '246': 'DASH Video', + '247': 'DASH Video', + '248': 'DASH Video', } + IE_NAME = u'youtube' _TESTS = [ { @@ -113,8 +363,8 @@ class YoutubeIE(InfoExtractor): u"info_dict": { u"upload_date": u"20120506", u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]", - u"description": u"md5:b085c9804f5ab69f4adea963a2dceb3c", - u"uploader": u"IconaPop", + u"description": u"md5:5b292926389560516e384ac437c0ec07", + u"uploader": u"Icona Pop", u"uploader_id": u"IconaPop" } }, @@ -136,12 +386,12 @@ class YoutubeIE(InfoExtractor): @classmethod def suitable(cls, url): """Receives a URL and returns True if suitable for this IE.""" - if YoutubePlaylistIE.suitable(url) or YoutubeSubscriptionsIE.suitable(url): return False + if YoutubePlaylistIE.suitable(url): return False return re.match(cls._VALID_URL, url, re.VERBOSE) is not None - def report_lang(self): - """Report attempt to set language.""" - self.to_screen(u'Setting language') + def __init__(self, *args, **kwargs): + super(YoutubeIE, self).__init__(*args, **kwargs) + self._player_cache = {} def report_video_webpage_download(self, video_id): """Report attempt to download video webpage.""" @@ -151,19 +401,6 @@ class YoutubeIE(InfoExtractor): """Report attempt to download video info webpage.""" self.to_screen(u'%s: Downloading video info webpage' % video_id) - def report_video_subtitles_download(self, video_id): - """Report attempt to download video info webpage.""" - self.to_screen(u'%s: Checking available subtitles' % video_id) - - def report_video_subtitles_request(self, video_id, sub_lang, format): - """Report attempt to download video info webpage.""" - self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format)) - - def report_video_subtitles_available(self, video_id, sub_lang_list): - """Report available subtitles.""" - sub_lang = ",".join(list(sub_lang_list.keys())) - self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang)) - def report_information_extraction(self, video_id): """Report attempt to extract video information.""" self.to_screen(u'%s: Extracting video information' % video_id) @@ -176,216 +413,772 @@ class YoutubeIE(InfoExtractor): """Indicate the download will use the RTMP protocol.""" self.to_screen(u'RTMP download detected') - def _decrypt_signature(self, s): + def _extract_signature_function(self, video_id, player_url, slen): + id_m = re.match(r'.*-(?P<id>[a-zA-Z0-9_-]+)\.(?P<ext>[a-z]+)$', + player_url) + player_type = id_m.group('ext') + player_id = id_m.group('id') + + # Read from filesystem cache + func_id = '%s_%s_%d' % (player_type, player_id, slen) + assert os.path.basename(func_id) == func_id + cache_dir = get_cachedir(self._downloader.params) + + cache_enabled = cache_dir is not None + if cache_enabled: + cache_fn = os.path.join(os.path.expanduser(cache_dir), + u'youtube-sigfuncs', + func_id + '.json') + try: + with io.open(cache_fn, 'r', encoding='utf-8') as cachef: + cache_spec = json.load(cachef) + return lambda s: u''.join(s[i] for i in cache_spec) + except IOError: + pass # No cache available + + if player_type == 'js': + code = self._download_webpage( + player_url, video_id, + note=u'Downloading %s player %s' % (player_type, player_id), + errnote=u'Download of %s failed' % player_url) + res = self._parse_sig_js(code) + elif player_type == 'swf': + urlh = self._request_webpage( + player_url, video_id, + note=u'Downloading %s player %s' % (player_type, player_id), + errnote=u'Download of %s failed' % player_url) + code = urlh.read() + res = self._parse_sig_swf(code) + else: + assert False, 'Invalid player type %r' % player_type + + if cache_enabled: + try: + test_string = u''.join(map(compat_chr, range(slen))) + cache_res = res(test_string) + cache_spec = [ord(c) for c in cache_res] + try: + os.makedirs(os.path.dirname(cache_fn)) + except OSError as ose: + if ose.errno != errno.EEXIST: + raise + write_json_file(cache_spec, cache_fn) + except Exception: + tb = traceback.format_exc() + self._downloader.report_warning( + u'Writing cache to %r failed: %s' % (cache_fn, tb)) + + return res + + def _print_sig_code(self, func, slen): + def gen_sig_code(idxs): + def _genslice(start, end, step): + starts = u'' if start == 0 else str(start) + ends = (u':%d' % (end+step)) if end + step >= 0 else u':' + steps = u'' if step == 1 else (u':%d' % step) + return u's[%s%s%s]' % (starts, ends, steps) + + step = None + start = '(Never used)' # Quelch pyflakes warnings - start will be + # set as soon as step is set + for i, prev in zip(idxs[1:], idxs[:-1]): + if step is not None: + if i - prev == step: + continue + yield _genslice(start, prev, step) + step = None + continue + if i - prev in [-1, 1]: + step = i - prev + start = prev + continue + else: + yield u's[%d]' % prev + if step is None: + yield u's[%d]' % i + else: + yield _genslice(start, i, step) + + test_string = u''.join(map(compat_chr, range(slen))) + cache_res = func(test_string) + cache_spec = [ord(c) for c in cache_res] + expr_code = u' + '.join(gen_sig_code(cache_spec)) + code = u'if len(s) == %d:\n return %s\n' % (slen, expr_code) + self.to_screen(u'Extracted signature function:\n' + code) + + def _parse_sig_js(self, jscode): + funcname = self._search_regex( + r'signature=([a-zA-Z]+)', jscode, + u'Initial JS player signature function name') + + functions = {} + + def argidx(varname): + return string.lowercase.index(varname) + + def interpret_statement(stmt, local_vars, allow_recursion=20): + if allow_recursion < 0: + raise ExtractorError(u'Recursion limit reached') + + if stmt.startswith(u'var '): + stmt = stmt[len(u'var '):] + ass_m = re.match(r'^(?P<out>[a-z]+)(?:\[(?P<index>[^\]]+)\])?' + + r'=(?P<expr>.*)$', stmt) + if ass_m: + if ass_m.groupdict().get('index'): + def assign(val): + lvar = local_vars[ass_m.group('out')] + idx = interpret_expression(ass_m.group('index'), + local_vars, allow_recursion) + assert isinstance(idx, int) + lvar[idx] = val + return val + expr = ass_m.group('expr') + else: + def assign(val): + local_vars[ass_m.group('out')] = val + return val + expr = ass_m.group('expr') + elif stmt.startswith(u'return '): + assign = lambda v: v + expr = stmt[len(u'return '):] + else: + raise ExtractorError( + u'Cannot determine left side of statement in %r' % stmt) + + v = interpret_expression(expr, local_vars, allow_recursion) + return assign(v) + + def interpret_expression(expr, local_vars, allow_recursion): + if expr.isdigit(): + return int(expr) + + if expr.isalpha(): + return local_vars[expr] + + m = re.match(r'^(?P<in>[a-z]+)\.(?P<member>.*)$', expr) + if m: + member = m.group('member') + val = local_vars[m.group('in')] + if member == 'split("")': + return list(val) + if member == 'join("")': + return u''.join(val) + if member == 'length': + return len(val) + if member == 'reverse()': + return val[::-1] + slice_m = re.match(r'slice\((?P<idx>.*)\)', member) + if slice_m: + idx = interpret_expression( + slice_m.group('idx'), local_vars, allow_recursion-1) + return val[idx:] + + m = re.match( + r'^(?P<in>[a-z]+)\[(?P<idx>.+)\]$', expr) + if m: + val = local_vars[m.group('in')] + idx = interpret_expression(m.group('idx'), local_vars, + allow_recursion-1) + return val[idx] + + m = re.match(r'^(?P<a>.+?)(?P<op>[%])(?P<b>.+?)$', expr) + if m: + a = interpret_expression(m.group('a'), + local_vars, allow_recursion) + b = interpret_expression(m.group('b'), + local_vars, allow_recursion) + return a % b + + m = re.match( + r'^(?P<func>[a-zA-Z]+)\((?P<args>[a-z0-9,]+)\)$', expr) + if m: + fname = m.group('func') + if fname not in functions: + functions[fname] = extract_function(fname) + argvals = [int(v) if v.isdigit() else local_vars[v] + for v in m.group('args').split(',')] + return functions[fname](argvals) + raise ExtractorError(u'Unsupported JS expression %r' % expr) + + def extract_function(funcname): + func_m = re.search( + r'function ' + re.escape(funcname) + + r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', + jscode) + argnames = func_m.group('args').split(',') + + def resf(args): + local_vars = dict(zip(argnames, args)) + for stmt in func_m.group('code').split(';'): + res = interpret_statement(stmt, local_vars) + return res + return resf + + initial_function = extract_function(funcname) + return lambda s: initial_function([s]) + + def _parse_sig_swf(self, file_contents): + if file_contents[1:3] != b'WS': + raise ExtractorError( + u'Not an SWF file; header is %r' % file_contents[:3]) + if file_contents[:1] == b'C': + content = zlib.decompress(file_contents[8:]) + else: + raise NotImplementedError(u'Unsupported compression format %r' % + file_contents[:1]) + + def extract_tags(content): + pos = 0 + while pos < len(content): + header16 = struct.unpack('<H', content[pos:pos+2])[0] + pos += 2 + tag_code = header16 >> 6 + tag_len = header16 & 0x3f + if tag_len == 0x3f: + tag_len = struct.unpack('<I', content[pos:pos+4])[0] + pos += 4 + assert pos+tag_len <= len(content) + yield (tag_code, content[pos:pos+tag_len]) + pos += tag_len + + code_tag = next(tag + for tag_code, tag in extract_tags(content) + if tag_code == 82) + p = code_tag.index(b'\0', 4) + 1 + code_reader = io.BytesIO(code_tag[p:]) + + # Parse ABC (AVM2 ByteCode) + def read_int(reader=None): + if reader is None: + reader = code_reader + res = 0 + shift = 0 + for _ in range(5): + buf = reader.read(1) + assert len(buf) == 1 + b = struct.unpack('<B', buf)[0] + res = res | ((b & 0x7f) << shift) + if b & 0x80 == 0: + break + shift += 7 + return res + + def u30(reader=None): + res = read_int(reader) + assert res & 0xf0000000 == 0 + return res + u32 = read_int + + def s32(reader=None): + v = read_int(reader) + if v & 0x80000000 != 0: + v = - ((v ^ 0xffffffff) + 1) + return v + + def read_string(reader=None): + if reader is None: + reader = code_reader + slen = u30(reader) + resb = reader.read(slen) + assert len(resb) == slen + return resb.decode('utf-8') + + def read_bytes(count, reader=None): + if reader is None: + reader = code_reader + resb = reader.read(count) + assert len(resb) == count + return resb + + def read_byte(reader=None): + resb = read_bytes(1, reader=reader) + res = struct.unpack('<B', resb)[0] + return res + + # minor_version + major_version + read_bytes(2 + 2) + + # Constant pool + int_count = u30() + for _c in range(1, int_count): + s32() + uint_count = u30() + for _c in range(1, uint_count): + u32() + double_count = u30() + read_bytes((double_count-1) * 8) + string_count = u30() + constant_strings = [u''] + for _c in range(1, string_count): + s = read_string() + constant_strings.append(s) + namespace_count = u30() + for _c in range(1, namespace_count): + read_bytes(1) # kind + u30() # name + ns_set_count = u30() + for _c in range(1, ns_set_count): + count = u30() + for _c2 in range(count): + u30() + multiname_count = u30() + MULTINAME_SIZES = { + 0x07: 2, # QName + 0x0d: 2, # QNameA + 0x0f: 1, # RTQName + 0x10: 1, # RTQNameA + 0x11: 0, # RTQNameL + 0x12: 0, # RTQNameLA + 0x09: 2, # Multiname + 0x0e: 2, # MultinameA + 0x1b: 1, # MultinameL + 0x1c: 1, # MultinameLA + } + multinames = [u''] + for _c in range(1, multiname_count): + kind = u30() + assert kind in MULTINAME_SIZES, u'Invalid multiname kind %r' % kind + if kind == 0x07: + u30() # namespace_idx + name_idx = u30() + multinames.append(constant_strings[name_idx]) + else: + multinames.append('[MULTINAME kind: %d]' % kind) + for _c2 in range(MULTINAME_SIZES[kind]): + u30() + + # Methods + method_count = u30() + MethodInfo = collections.namedtuple( + 'MethodInfo', + ['NEED_ARGUMENTS', 'NEED_REST']) + method_infos = [] + for method_id in range(method_count): + param_count = u30() + u30() # return type + for _ in range(param_count): + u30() # param type + u30() # name index (always 0 for youtube) + flags = read_byte() + if flags & 0x08 != 0: + # Options present + option_count = u30() + for c in range(option_count): + u30() # val + read_bytes(1) # kind + if flags & 0x80 != 0: + # Param names present + for _ in range(param_count): + u30() # param name + mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0) + method_infos.append(mi) + + # Metadata + metadata_count = u30() + for _c in range(metadata_count): + u30() # name + item_count = u30() + for _c2 in range(item_count): + u30() # key + u30() # value + + def parse_traits_info(): + trait_name_idx = u30() + kind_full = read_byte() + kind = kind_full & 0x0f + attrs = kind_full >> 4 + methods = {} + if kind in [0x00, 0x06]: # Slot or Const + u30() # Slot id + u30() # type_name_idx + vindex = u30() + if vindex != 0: + read_byte() # vkind + elif kind in [0x01, 0x02, 0x03]: # Method / Getter / Setter + u30() # disp_id + method_idx = u30() + methods[multinames[trait_name_idx]] = method_idx + elif kind == 0x04: # Class + u30() # slot_id + u30() # classi + elif kind == 0x05: # Function + u30() # slot_id + function_idx = u30() + methods[function_idx] = multinames[trait_name_idx] + else: + raise ExtractorError(u'Unsupported trait kind %d' % kind) + + if attrs & 0x4 != 0: # Metadata present + metadata_count = u30() + for _c3 in range(metadata_count): + u30() # metadata index + + return methods + + # Classes + TARGET_CLASSNAME = u'SignatureDecipher' + searched_idx = multinames.index(TARGET_CLASSNAME) + searched_class_id = None + class_count = u30() + for class_id in range(class_count): + name_idx = u30() + if name_idx == searched_idx: + # We found the class we're looking for! + searched_class_id = class_id + u30() # super_name idx + flags = read_byte() + if flags & 0x08 != 0: # Protected namespace is present + u30() # protected_ns_idx + intrf_count = u30() + for _c2 in range(intrf_count): + u30() + u30() # iinit + trait_count = u30() + for _c2 in range(trait_count): + parse_traits_info() + + if searched_class_id is None: + raise ExtractorError(u'Target class %r not found' % + TARGET_CLASSNAME) + + method_names = {} + method_idxs = {} + for class_id in range(class_count): + u30() # cinit + trait_count = u30() + for _c2 in range(trait_count): + trait_methods = parse_traits_info() + if class_id == searched_class_id: + method_names.update(trait_methods.items()) + method_idxs.update(dict( + (idx, name) + for name, idx in trait_methods.items())) + + # Scripts + script_count = u30() + for _c in range(script_count): + u30() # init + trait_count = u30() + for _c2 in range(trait_count): + parse_traits_info() + + # Method bodies + method_body_count = u30() + Method = collections.namedtuple('Method', ['code', 'local_count']) + methods = {} + for _c in range(method_body_count): + method_idx = u30() + u30() # max_stack + local_count = u30() + u30() # init_scope_depth + u30() # max_scope_depth + code_length = u30() + code = read_bytes(code_length) + if method_idx in method_idxs: + m = Method(code, local_count) + methods[method_idxs[method_idx]] = m + exception_count = u30() + for _c2 in range(exception_count): + u30() # from + u30() # to + u30() # target + u30() # exc_type + u30() # var_name + trait_count = u30() + for _c2 in range(trait_count): + parse_traits_info() + + assert p + code_reader.tell() == len(code_tag) + assert len(methods) == len(method_idxs) + + method_pyfunctions = {} + + def extract_function(func_name): + if func_name in method_pyfunctions: + return method_pyfunctions[func_name] + if func_name not in methods: + raise ExtractorError(u'Cannot find function %r' % func_name) + m = methods[func_name] + + def resfunc(args): + registers = ['(this)'] + list(args) + [None] * m.local_count + stack = [] + coder = io.BytesIO(m.code) + while True: + opcode = struct.unpack('!B', coder.read(1))[0] + if opcode == 36: # pushbyte + v = struct.unpack('!B', coder.read(1))[0] + stack.append(v) + elif opcode == 44: # pushstring + idx = u30(coder) + stack.append(constant_strings[idx]) + elif opcode == 48: # pushscope + # We don't implement the scope register, so we'll just + # ignore the popped value + stack.pop() + elif opcode == 70: # callproperty + index = u30(coder) + mname = multinames[index] + arg_count = u30(coder) + args = list(reversed( + [stack.pop() for _ in range(arg_count)])) + obj = stack.pop() + if mname == u'split': + assert len(args) == 1 + assert isinstance(args[0], compat_str) + assert isinstance(obj, compat_str) + if args[0] == u'': + res = list(obj) + else: + res = obj.split(args[0]) + stack.append(res) + elif mname == u'slice': + assert len(args) == 1 + assert isinstance(args[0], int) + assert isinstance(obj, list) + res = obj[args[0]:] + stack.append(res) + elif mname == u'join': + assert len(args) == 1 + assert isinstance(args[0], compat_str) + assert isinstance(obj, list) + res = args[0].join(obj) + stack.append(res) + elif mname in method_pyfunctions: + stack.append(method_pyfunctions[mname](args)) + else: + raise NotImplementedError( + u'Unsupported property %r on %r' + % (mname, obj)) + elif opcode == 72: # returnvalue + res = stack.pop() + return res + elif opcode == 79: # callpropvoid + index = u30(coder) + mname = multinames[index] + arg_count = u30(coder) + args = list(reversed( + [stack.pop() for _ in range(arg_count)])) + obj = stack.pop() + if mname == u'reverse': + assert isinstance(obj, list) + obj.reverse() + else: + raise NotImplementedError( + u'Unsupported (void) property %r on %r' + % (mname, obj)) + elif opcode == 93: # findpropstrict + index = u30(coder) + mname = multinames[index] + res = extract_function(mname) + stack.append(res) + elif opcode == 97: # setproperty + index = u30(coder) + value = stack.pop() + idx = stack.pop() + obj = stack.pop() + assert isinstance(obj, list) + assert isinstance(idx, int) + obj[idx] = value + elif opcode == 98: # getlocal + index = u30(coder) + stack.append(registers[index]) + elif opcode == 99: # setlocal + index = u30(coder) + value = stack.pop() + registers[index] = value + elif opcode == 102: # getproperty + index = u30(coder) + pname = multinames[index] + if pname == u'length': + obj = stack.pop() + assert isinstance(obj, list) + stack.append(len(obj)) + else: # Assume attribute access + idx = stack.pop() + assert isinstance(idx, int) + obj = stack.pop() + assert isinstance(obj, list) + stack.append(obj[idx]) + elif opcode == 128: # coerce + u30(coder) + elif opcode == 133: # coerce_s + assert isinstance(stack[-1], (type(None), compat_str)) + elif opcode == 164: # modulo + value2 = stack.pop() + value1 = stack.pop() + res = value1 % value2 + stack.append(res) + elif opcode == 208: # getlocal_0 + stack.append(registers[0]) + elif opcode == 209: # getlocal_1 + stack.append(registers[1]) + elif opcode == 210: # getlocal_2 + stack.append(registers[2]) + elif opcode == 211: # getlocal_3 + stack.append(registers[3]) + elif opcode == 214: # setlocal_2 + registers[2] = stack.pop() + elif opcode == 215: # setlocal_3 + registers[3] = stack.pop() + else: + raise NotImplementedError( + u'Unsupported opcode %d' % opcode) + + method_pyfunctions[func_name] = resfunc + return resfunc + + initial_function = extract_function(u'decipher') + return lambda s: initial_function([s]) + + def _decrypt_signature(self, s, video_id, player_url, age_gate=False): """Turn the encrypted s field into a working signature""" - if len(s) == 88: - return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1] + s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12] + if player_url is not None: + try: + player_id = (player_url, len(s)) + if player_id not in self._player_cache: + func = self._extract_signature_function( + video_id, player_url, len(s) + ) + self._player_cache[player_id] = func + func = self._player_cache[player_id] + if self._downloader.params.get('youtube_print_sig_code'): + self._print_sig_code(func, len(s)) + return func(s) + except Exception: + tb = traceback.format_exc() + self._downloader.report_warning( + u'Automatic signature extraction failed: ' + tb) + + self._downloader.report_warning( + u'Warning: Falling back to static signature algorithm') + + return self._static_decrypt_signature( + s, video_id, player_url, age_gate) + + def _static_decrypt_signature(self, s, video_id, player_url, age_gate): + if age_gate: + # The videos with age protection use another player, so the + # algorithms can be different. + if len(s) == 86: + return s[2:63] + s[82] + s[64:82] + s[63] + + if len(s) == 93: + return s[86:29:-1] + s[88] + s[28:5:-1] + elif len(s) == 92: + return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83] + elif len(s) == 91: + return s[84:27:-1] + s[86] + s[26:5:-1] + elif len(s) == 90: + return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + s[78:81] + elif len(s) == 89: + return s[84:78:-1] + s[87] + s[77:60:-1] + s[0] + s[59:3:-1] + elif len(s) == 88: + return s[7:28] + s[87] + s[29:45] + s[55] + s[46:55] + s[2] + s[56:87] + s[28] elif len(s) == 87: - return s[62] + s[82:62:-1] + s[83] + s[61:52:-1] + s[0] + s[51:2:-1] + return s[6:27] + s[4] + s[28:39] + s[27] + s[40:59] + s[2] + s[60:] elif len(s) == 86: - return s[2:63] + s[82] + s[64:82] + s[63] + return s[80:72:-1] + s[16] + s[71:39:-1] + s[72] + s[38:16:-1] + s[82] + s[15::-1] elif len(s) == 85: - return s[76] + s[82:76:-1] + s[83] + s[75:60:-1] + s[0] + s[59:50:-1] + s[1] + s[49:2:-1] + return s[3:11] + s[0] + s[12:55] + s[84] + s[56:84] elif len(s) == 84: - return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26] + return s[78:70:-1] + s[14] + s[69:37:-1] + s[70] + s[36:14:-1] + s[80] + s[:14][::-1] elif len(s) == 83: - return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[53] + s[34:53] + s[24] + s[54:] + return s[80:63:-1] + s[0] + s[62:0:-1] + s[63] elif len(s) == 82: - return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1] + s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34] + return s[80:37:-1] + s[7] + s[36:7:-1] + s[0] + s[6:0:-1] + s[37] elif len(s) == 81: - return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[2] + s[34:53] + s[24] + s[54:81] + return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9] + elif len(s) == 80: + return s[1:19] + s[0] + s[20:68] + s[19] + s[69:80] + elif len(s) == 79: + return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9] else: raise ExtractorError(u'Unable to decrypt signature, key length %d not supported; retrying might work' % (len(s))) def _get_available_subtitles(self, video_id): - self.report_video_subtitles_download(video_id) - request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) try: - sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - return (u'unable to download video subtitles: %s' % compat_str(err), None) - sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) - sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list) + sub_list = self._download_webpage( + 'http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id, + video_id, note=False) + except ExtractorError as err: + self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err)) + return {} + lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) + + sub_lang_list = {} + for l in lang_list: + lang = l[1] + params = compat_urllib_parse.urlencode({ + 'lang': lang, + 'v': video_id, + 'fmt': self._downloader.params.get('subtitlesformat'), + 'name': l[0], + }) + url = u'http://www.youtube.com/api/timedtext?' + params + sub_lang_list[lang] = url if not sub_lang_list: - return (u'video doesn\'t have subtitles', None) + self._downloader.report_warning(u'video doesn\'t have subtitles') + return {} return sub_lang_list - def _list_available_subtitles(self, video_id): - sub_lang_list = self._get_available_subtitles(video_id) - self.report_video_subtitles_available(video_id, sub_lang_list) - - def _request_subtitle(self, sub_lang, sub_name, video_id, format): - """ - Return tuple: - (error_message, sub_lang, sub) - """ - self.report_video_subtitles_request(video_id, sub_lang, format) - params = compat_urllib_parse.urlencode({ - 'lang': sub_lang, - 'name': sub_name, - 'v': video_id, - 'fmt': format, - }) - url = 'http://www.youtube.com/api/timedtext?' + params - try: - sub = compat_urllib_request.urlopen(url).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - return (u'unable to download video subtitles: %s' % compat_str(err), None, None) - if not sub: - return (u'Did not fetch video subtitles', None, None) - return (None, sub_lang, sub) - - def _request_automatic_caption(self, video_id, webpage): + def _get_available_automatic_caption(self, video_id, webpage): """We need the webpage for getting the captions url, pass it as an argument to speed up the process.""" - sub_lang = self._downloader.params.get('subtitleslang') or 'en' sub_format = self._downloader.params.get('subtitlesformat') self.to_screen(u'%s: Looking for automatic captions' % video_id) mobj = re.search(r';ytplayer.config = ({.*?});', webpage) - err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang + err_msg = u'Couldn\'t find automatic captions for %s' % video_id if mobj is None: - return [(err_msg, None, None)] + self._downloader.report_warning(err_msg) + return {} player_config = json.loads(mobj.group(1)) try: args = player_config[u'args'] caption_url = args[u'ttsurl'] timestamp = args[u'timestamp'] - params = compat_urllib_parse.urlencode({ - 'lang': 'en', - 'tlang': sub_lang, - 'fmt': sub_format, - 'ts': timestamp, - 'kind': 'asr', + # We get the available subtitles + list_params = compat_urllib_parse.urlencode({ + 'type': 'list', + 'tlangs': 1, + 'asrs': 1, }) - subtitles_url = caption_url + '&' + params - sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions') - return [(None, sub_lang, sub)] - except KeyError: - return [(err_msg, None, None)] - - def _extract_subtitle(self, video_id): - """ - Return a list with a tuple: - [(error_message, sub_lang, sub)] - """ - sub_lang_list = self._get_available_subtitles(video_id) - sub_format = self._downloader.params.get('subtitlesformat') - if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles - return [(sub_lang_list[0], None, None)] - if self._downloader.params.get('subtitleslang', False): - sub_lang = self._downloader.params.get('subtitleslang') - elif 'en' in sub_lang_list: - sub_lang = 'en' - else: - sub_lang = list(sub_lang_list.keys())[0] - if not sub_lang in sub_lang_list: - return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)] - - subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) - return [subtitle] - - def _extract_all_subtitles(self, video_id): - sub_lang_list = self._get_available_subtitles(video_id) - sub_format = self._downloader.params.get('subtitlesformat') - if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles - return [(sub_lang_list[0], None, None)] - subtitles = [] - for sub_lang in sub_lang_list: - subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) - subtitles.append(subtitle) - return subtitles + list_url = caption_url + '&' + list_params + list_page = self._download_webpage(list_url, video_id) + caption_list = xml.etree.ElementTree.fromstring(list_page.encode('utf-8')) + original_lang_node = caption_list.find('track') + if original_lang_node.attrib.get('kind') != 'asr' : + self._downloader.report_warning(u'Video doesn\'t have automatic captions') + return {} + original_lang = original_lang_node.attrib['lang_code'] + + sub_lang_list = {} + for lang_node in caption_list.findall('target'): + sub_lang = lang_node.attrib['lang_code'] + params = compat_urllib_parse.urlencode({ + 'lang': original_lang, + 'tlang': sub_lang, + 'fmt': sub_format, + 'ts': timestamp, + 'kind': 'asr', + }) + sub_lang_list[sub_lang] = caption_url + '&' + params + return sub_lang_list + # An extractor error can be raise by the download process if there are + # no automatic captions but there are subtitles + except (KeyError, ExtractorError): + self._downloader.report_warning(err_msg) + return {} def _print_formats(self, formats): print('Available formats:') for x in formats: - print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))) - - def _real_initialize(self): - if self._downloader is None: - return - - # Set language - request = compat_urllib_request.Request(self._LANG_URL) - try: - self.report_lang() - compat_urllib_request.urlopen(request).read() - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.report_warning(u'unable to set language: %s' % compat_str(err)) - return - - (username, password) = self._get_login_info() - - # No authentication to be performed - if username is None: - return - - request = compat_urllib_request.Request(self._LOGIN_URL) - try: - login_page = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err)) - return - - galx = None - dsh = None - match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page) - if match: - galx = match.group(1) - - match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page) - if match: - dsh = match.group(1) - - # Log in - login_form_strs = { - u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', - u'Email': username, - u'GALX': galx, - u'Passwd': password, - u'PersistentCookie': u'yes', - u'_utf8': u'霱', - u'bgresponse': u'js_disabled', - u'checkConnection': u'', - u'checkedDomains': u'youtube', - u'dnConn': u'', - u'dsh': dsh, - u'pstMsg': u'0', - u'rmShown': u'1', - u'secTok': u'', - u'signIn': u'Sign in', - u'timeStmp': u'', - u'service': u'youtube', - u'uilel': u'3', - u'hl': u'en_US', - } - # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode - # chokes on unicode - login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items()) - login_data = compat_urllib_parse.urlencode(login_form).encode('ascii') - request = compat_urllib_request.Request(self._LOGIN_URL, login_data) - try: - self.report_login() - login_results = compat_urllib_request.urlopen(request).read().decode('utf-8') - if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None: - self._downloader.report_warning(u'unable to log in: bad username or password') - return - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - self._downloader.report_warning(u'unable to log in: %s' % compat_str(err)) - return - - # Confirm age - age_form = { - 'next_url': '/', - 'action_confirm': 'Confirm', - } - request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form)) - try: - self.report_age_confirmation() - compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err)) + print('%s\t:\t%s\t[%s]%s' %(x, self._video_extensions.get(x, 'flv'), + self._video_dimensions.get(x, '???'), + ' ('+self._special_itags[x]+')' if x in self._special_itags else '')) def _extract_id(self, url): mobj = re.match(self._VALID_URL, url, re.VERBOSE) @@ -394,10 +1187,74 @@ class YoutubeIE(InfoExtractor): video_id = mobj.group(2) return video_id - def _real_extract(self, url): - if re.match(r'(?:https?://)?[^/]+/watch\?feature=[a-z_]+$', url): - self._downloader.report_warning(u'Did you forget to quote the URL? Remember that & is a meta-character in most shells, so you want to put the URL in quotes, like youtube-dl \'http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc\' (or simply youtube-dl BaW_jenozKc ).') + def _get_video_url_list(self, url_map): + """ + Transform a dictionary in the format {itag:url} to a list of (itag, url) + with the requested formats. + """ + req_format = self._downloader.params.get('format', None) + format_limit = self._downloader.params.get('format_limit', None) + available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats + if format_limit is not None and format_limit in available_formats: + format_list = available_formats[available_formats.index(format_limit):] + else: + format_list = available_formats + existing_formats = [x for x in format_list if x in url_map] + if len(existing_formats) == 0: + raise ExtractorError(u'no known formats available for video') + if self._downloader.params.get('listformats', None): + self._print_formats(existing_formats) + return + if req_format is None or req_format == 'best': + video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality + elif req_format == 'worst': + video_url_list = [(existing_formats[-1], url_map[existing_formats[-1]])] # worst quality + elif req_format in ('-1', 'all'): + video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats + else: + # Specific formats. We pick the first in a slash-delimeted sequence. + # Format can be specified as itag or 'mp4' or 'flv' etc. We pick the highest quality + # available in the specified format. For example, + # if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. + # if '1/mp4/3/4' is requested and '1' and '5' (is a mp4) are available, we pick '1'. + # if '1/mp4/3/4' is requested and '4' and '5' (is a mp4) are available, we pick '5'. + req_formats = req_format.split('/') + video_url_list = None + for rf in req_formats: + if rf in url_map: + video_url_list = [(rf, url_map[rf])] + break + if rf in self._video_formats_map: + for srf in self._video_formats_map[rf]: + if srf in url_map: + video_url_list = [(srf, url_map[srf])] + break + else: + continue + break + if video_url_list is None: + raise ExtractorError(u'requested format not available') + return video_url_list + + def _extract_from_m3u8(self, manifest_url, video_id): + url_map = {} + def _get_urls(_manifest): + lines = _manifest.split('\n') + urls = filter(lambda l: l and not l.startswith('#'), + lines) + return urls + manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest') + formats_urls = _get_urls(manifest) + for format_url in formats_urls: + itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag') + url_map[itag] = format_url + return url_map + + def _extract_annotations(self, video_id): + url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id + return self._download_webpage(url, video_id, note=u'Searching for annotations.', errnote=u'Unable to download video annotations.') + def _real_extract(self, url): # Extract original video URL from URL with redirection, like age verification, using next_url parameter mobj = re.search(self._NEXT_URL_RE, url) if mobj: @@ -416,7 +1273,7 @@ class YoutubeIE(InfoExtractor): video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') # Attempt to extract SWF player URL - mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) + mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) if mobj is not None: player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) else: @@ -480,9 +1337,11 @@ class YoutubeIE(InfoExtractor): self._downloader.report_warning(u'unable to extract uploader nickname') # title - if 'title' not in video_info: - raise ExtractorError(u'Unable to extract video title') - video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) + if 'title' in video_info: + video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) + else: + self._downloader.report_warning(u'Unable to extract video title') + video_title = u'_' # thumbnail image # We try first to get a high quality image: @@ -492,7 +1351,7 @@ class YoutubeIE(InfoExtractor): video_thumbnail = m_thumb.group(1) elif 'thumbnail_url' not in video_info: self._downloader.report_warning(u'unable to extract video thumbnail') - video_thumbnail = '' + video_thumbnail = None else: # don't panic if we can't find it video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) @@ -515,30 +1374,10 @@ class YoutubeIE(InfoExtractor): video_description = u'' # subtitles - video_subtitles = None - - if self._downloader.params.get('writesubtitles', False): - video_subtitles = self._extract_subtitle(video_id) - if video_subtitles: - (sub_error, sub_lang, sub) = video_subtitles[0] - if sub_error: - self._downloader.report_warning(sub_error) - - if self._downloader.params.get('writeautomaticsub', False): - video_subtitles = self._request_automatic_caption(video_id, video_webpage) - (sub_error, sub_lang, sub) = video_subtitles[0] - if sub_error: - self._downloader.report_warning(sub_error) - - if self._downloader.params.get('allsubtitles', False): - video_subtitles = self._extract_all_subtitles(video_id) - for video_subtitle in video_subtitles: - (sub_error, sub_lang, sub) = video_subtitle - if sub_error: - self._downloader.report_warning(sub_error) + video_subtitles = self.extract_subtitles(video_id, video_webpage) if self._downloader.params.get('listsubtitles', False): - self._list_available_subtitles(video_id) + self._list_available_subtitles(video_id, video_webpage) return if 'length_seconds' not in video_info: @@ -547,8 +1386,12 @@ class YoutubeIE(InfoExtractor): else: video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) + # annotations + video_annotations = None + if self._downloader.params.get('writeannotations', False): + video_annotations = self._extract_annotations(video_id) + # Decide which formats to download - req_format = self._downloader.params.get('format', None) try: mobj = re.search(r';ytplayer.config = ({.*?});', video_webpage) @@ -558,10 +1401,23 @@ class YoutubeIE(InfoExtractor): args = info['args'] # Easy way to know if the 's' value is in url_encoded_fmt_stream_map # this signatures are encrypted + if 'url_encoded_fmt_stream_map' not in args: + raise ValueError(u'No stream_map present') # caught below m_s = re.search(r'[&,]s=', args['url_encoded_fmt_stream_map']) if m_s is not None: self.to_screen(u'%s: Encrypted signatures detected.' % video_id) video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']] + m_s = re.search(r'[&,]s=', args.get('adaptive_fmts', u'')) + if m_s is not None: + if 'url_encoded_fmt_stream_map' in video_info: + video_info['url_encoded_fmt_stream_map'][0] += ',' + args['adaptive_fmts'] + else: + video_info['url_encoded_fmt_stream_map'] = [args['adaptive_fmts']] + elif 'adaptive_fmts' in video_info: + if 'url_encoded_fmt_stream_map' in video_info: + video_info['url_encoded_fmt_stream_map'][0] += ',' + video_info['adaptive_fmts'][0] + else: + video_info['url_encoded_fmt_stream_map'] = video_info['adaptive_fmts'] except ValueError: pass @@ -579,63 +1435,59 @@ class YoutubeIE(InfoExtractor): if 'sig' in url_data: url += '&signature=' + url_data['sig'][0] elif 's' in url_data: + encrypted_sig = url_data['s'][0] if self._downloader.params.get('verbose'): - s = url_data['s'][0] if age_gate: - player_version = self._search_regex(r'ad3-(.+?)\.swf', - video_info['ad3_module'][0], 'flash player', - fatal=False) - player = 'flash player %s' % player_version + if player_url is None: + player_version = 'unknown' + else: + player_version = self._search_regex( + r'-(.+)\.swf$', player_url, + u'flash player', fatal=False) + player_desc = 'flash player %s' % player_version else: - player = u'html5 player %s' % self._search_regex(r'html5player-(.+?)\.js', video_webpage, + player_version = self._search_regex( + r'html5player-(.+?)\.js', video_webpage, 'html5 player', fatal=False) - self.to_screen('encrypted signature length %d (%d.%d), itag %s, %s' % - (len(s), len(s.split('.')[0]), len(s.split('.')[1]), url_data['itag'][0], player)) - signature = self._decrypt_signature(url_data['s'][0]) + player_desc = u'html5 player %s' % player_version + + parts_sizes = u'.'.join(compat_str(len(part)) for part in encrypted_sig.split('.')) + self.to_screen(u'encrypted signature length %d (%s), itag %s, %s' % + (len(encrypted_sig), parts_sizes, url_data['itag'][0], player_desc)) + + if not age_gate: + jsplayer_url_json = self._search_regex( + r'"assets":.+?"js":\s*("[^"]+")', + video_webpage, u'JS player URL') + player_url = json.loads(jsplayer_url_json) + + signature = self._decrypt_signature( + encrypted_sig, video_id, player_url, age_gate) url += '&signature=' + signature if 'ratebypass' not in url: url += '&ratebypass=yes' url_map[url_data['itag'][0]] = url - - format_limit = self._downloader.params.get('format_limit', None) - available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats - if format_limit is not None and format_limit in available_formats: - format_list = available_formats[available_formats.index(format_limit):] - else: - format_list = available_formats - existing_formats = [x for x in format_list if x in url_map] - if len(existing_formats) == 0: - raise ExtractorError(u'no known formats available for video') - if self._downloader.params.get('listformats', None): - self._print_formats(existing_formats) + video_url_list = self._get_video_url_list(url_map) + if not video_url_list: return - if req_format is None or req_format == 'best': - video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality - elif req_format == 'worst': - video_url_list = [(existing_formats[-1], url_map[existing_formats[-1]])] # worst quality - elif req_format in ('-1', 'all'): - video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats - else: - # Specific formats. We pick the first in a slash-delimeted sequence. - # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. - req_formats = req_format.split('/') - video_url_list = None - for rf in req_formats: - if rf in url_map: - video_url_list = [(rf, url_map[rf])] - break - if video_url_list is None: - raise ExtractorError(u'requested format not available') + elif video_info.get('hlsvp'): + manifest_url = video_info['hlsvp'][0] + url_map = self._extract_from_m3u8(manifest_url, video_id) + video_url_list = self._get_video_url_list(url_map) + if not video_url_list: + return + else: - raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info') + raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info') results = [] for format_param, video_real_url in video_url_list: # Extension video_extension = self._video_extensions.get(format_param, 'flv') - video_format = '{0} - {1}'.format(format_param if format_param else video_extension, - self._video_dimensions.get(format_param, '???')) + video_format = '{0} - {1}{2}'.format(format_param if format_param else video_extension, + self._video_dimensions.get(format_param, '???'), + ' ('+self._special_itags[format_param]+')' if format_param in self._special_itags else '') results.append({ 'id': video_id, @@ -650,7 +1502,9 @@ class YoutubeIE(InfoExtractor): 'description': video_description, 'player_url': player_url, 'subtitles': video_subtitles, - 'duration': video_duration + 'duration': video_duration, + 'age_limit': 18 if age_gate else 0, + 'annotations': video_annotations }) return results @@ -665,10 +1519,10 @@ class YoutubePlaylistIE(InfoExtractor): \? (?:.*?&)*? (?:p|a|list)= | p/ ) - ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,}) + ((?:PL|EC|UU|FL)?[0-9A-Za-z-_]{10,}) .* | - ((?:PL|EC|UU)[0-9A-Za-z-_]{10,}) + ((?:PL|EC|UU|FL)[0-9A-Za-z-_]{10,}) )""" _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none' _MAX_RESULTS = 50 @@ -684,14 +1538,27 @@ class YoutubePlaylistIE(InfoExtractor): mobj = re.match(self._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError(u'Invalid URL: %s' % url) + playlist_id = mobj.group(1) or mobj.group(2) + + # Check if it's a video-specific URL + query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + if 'v' in query_dict: + video_id = query_dict['v'][0] + if self._downloader.params.get('noplaylist'): + self.to_screen(u'Downloading just video %s because of --no-playlist' % video_id) + return self.url_result('https://www.youtube.com/watch?v=' + video_id, 'Youtube') + else: + self.to_screen(u'Downloading playlist PL%s - add --no-playlist to just download video %s' % (playlist_id, video_id)) # Download playlist videos from API - playlist_id = mobj.group(1) or mobj.group(2) - page_num = 1 videos = [] - while True: - url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1) + for page_num in itertools.count(1): + start_index = self._MAX_RESULTS * (page_num - 1) + 1 + if start_index >= 1000: + self._downloader.report_warning(u'Max number of results reached') + break + url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, start_index) page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num) try: @@ -708,12 +1575,11 @@ class YoutubePlaylistIE(InfoExtractor): for entry in response['feed']['entry']: index = entry['yt$position']['$t'] - if 'media$group' in entry and 'media$player' in entry['media$group']: - videos.append((index, entry['media$group']['media$player']['url'])) - - if len(response['feed']['entry']) < self._MAX_RESULTS: - break - page_num += 1 + if 'media$group' in entry and 'yt$videoid' in entry['media$group']: + videos.append(( + index, + 'https://www.youtube.com/watch?v=' + entry['media$group']['yt$videoid']['$t'] + )) videos = [v[1] for v in sorted(videos)] @@ -726,7 +1592,7 @@ class YoutubeChannelIE(InfoExtractor): _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)" _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' _MORE_PAGES_INDICATOR = 'yt-uix-load-more' - _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s' + _MORE_PAGES_URL = 'http://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s' IE_NAME = u'youtube:channel' def extract_videos_from_page(self, page): @@ -757,9 +1623,7 @@ class YoutubeChannelIE(InfoExtractor): # Download any subsequent channel pages using the json-based channel_ajax query if self._MORE_PAGES_INDICATOR in page: - while True: - pagenum = pagenum + 1 - + for pagenum in itertools.count(1): url = self._MORE_PAGES_URL % (pagenum, channel_id) page = self._download_webpage(url, channel_id, u'Downloading page #%s' % pagenum) @@ -781,13 +1645,20 @@ class YoutubeChannelIE(InfoExtractor): class YoutubeUserIE(InfoExtractor): IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)' - _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' + _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)' _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' _GDATA_PAGE_SIZE = 50 - _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' - _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' + _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json' IE_NAME = u'youtube:user' + @classmethod + def suitable(cls, url): + # Don't return True if the url can be extracted with other youtube + # extractor, the regex would is too permissive and it would match. + other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls) + if any(ie.suitable(url) for ie in other_ies): return False + else: return super(YoutubeUserIE, cls).suitable(url) + def _real_extract(self, url): # Extract username mobj = re.match(self._VALID_URL, url) @@ -802,22 +1673,26 @@ class YoutubeUserIE(InfoExtractor): # all of them. video_ids = [] - pagenum = 0 - while True: + for pagenum in itertools.count(0): start_index = pagenum * self._GDATA_PAGE_SIZE + 1 gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index) page = self._download_webpage(gdata_url, username, u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE)) + try: + response = json.loads(page) + except ValueError as err: + raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err)) + if 'entry' not in response['feed']: + # Number of videos is a multiple of self._MAX_RESULTS + break + # Extract video identifiers ids_in_page = [] - - for mobj in re.finditer(self._VIDEO_INDICATOR, page): - if mobj.group(1) not in ids_in_page: - ids_in_page.append(mobj.group(1)) - + for entry in response['feed']['entry']: + ids_in_page.append(entry['id']['$t'].split('/')[-1]) video_ids.extend(ids_in_page) # A little optimization - if current page is not @@ -829,8 +1704,6 @@ class YoutubeUserIE(InfoExtractor): if len(ids_in_page) < self._GDATA_PAGE_SIZE: break - pagenum += 1 - urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids] url_results = [self.url_result(rurl, 'Youtube') for rurl in urls] return [self.playlist_result(url_results, playlist_title = username)] @@ -893,38 +1766,90 @@ class YoutubeShowIE(InfoExtractor): return [self.url_result('https://www.youtube.com' + season.group(1), 'YoutubePlaylist') for season in m_seasons] -class YoutubeSubscriptionsIE(YoutubeIE): - """It's a subclass of YoutubeIE because we need to login""" - IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword(requires authentication)' - _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' - IE_NAME = u'youtube:subscriptions' - _FEED_TEMPLATE = 'http://www.youtube.com/feed_ajax?action_load_system_feed=1&feed_name=subscriptions&paging=%s' +class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor): + """ + Base class for extractors that fetch info from + http://www.youtube.com/feed_ajax + Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties. + """ + _LOGIN_REQUIRED = True _PAGING_STEP = 30 + # use action_load_personal_feed instead of action_load_system_feed + _PERSONAL_FEED = False - # Overwrite YoutubeIE properties we don't want - _TESTS = [] - @classmethod - def suitable(cls, url): - return re.match(cls._VALID_URL, url) is not None + @property + def _FEED_TEMPLATE(self): + action = 'action_load_system_feed' + if self._PERSONAL_FEED: + action = 'action_load_personal_feed' + return 'http://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME) + + @property + def IE_NAME(self): + return u'youtube:%s' % self._FEED_NAME def _real_initialize(self): - (username, password) = self._get_login_info() - if username is None: - raise ExtractorError(u'No login info available, needed for downloading the Youtube subscriptions.', expected=True) - super(YoutubeSubscriptionsIE, self)._real_initialize() + self._login() def _real_extract(self, url): feed_entries = [] # The step argument is available only in 2.7 or higher for i in itertools.count(0): paging = i*self._PAGING_STEP - info = self._download_webpage(self._FEED_TEMPLATE % paging, 'feed', + info = self._download_webpage(self._FEED_TEMPLATE % paging, + u'%s feed' % self._FEED_NAME, u'Downloading page %s' % i) info = json.loads(info) feed_html = info['feed_html'] - m_ids = re.finditer(r'"/watch\?v=(.*?)"', feed_html) + m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html) ids = orderedSet(m.group(1) for m in m_ids) feed_entries.extend(self.url_result(id, 'Youtube') for id in ids) if info['paging'] is None: break - return self.playlist_result(feed_entries, playlist_title='Youtube Subscriptions') + return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE) + +class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor): + IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword(requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?' + _FEED_NAME = 'subscriptions' + _PLAYLIST_TITLE = u'Youtube Subscriptions' + +class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor): + IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?' + _FEED_NAME = 'recommended' + _PLAYLIST_TITLE = u'Youtube Recommended videos' + +class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor): + IE_DESC = u'Youtube watch later list, "ytwatchlater" keyword (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater' + _FEED_NAME = 'watch_later' + _PLAYLIST_TITLE = u'Youtube Watch Later' + _PAGING_STEP = 100 + _PERSONAL_FEED = True + +class YoutubeFavouritesIE(YoutubeBaseInfoExtractor): + IE_NAME = u'youtube:favorites' + IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)' + _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?' + _LOGIN_REQUIRED = True + + def _real_extract(self, url): + webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos') + playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, u'favourites playlist id') + return self.url_result(playlist_id, 'YoutubePlaylist') + + +class YoutubeTruncatedURLIE(InfoExtractor): + IE_NAME = 'youtube:truncated_url' + IE_DESC = False # Do not list + _VALID_URL = r'(?:https?://)?[^/]+/watch\?feature=[a-z_]+$' + + def _real_extract(self, url): + raise ExtractorError( + u'Did you forget to quote the URL? Remember that & is a meta ' + u'character in most shells, so you want to put the URL in quotes, ' + u'like youtube-dl ' + u'\'http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc\'' + u' (or simply youtube-dl BaW_jenozKc ).', + expected=True) diff --git a/youtube_dl/extractor/zdf.py b/youtube_dl/extractor/zdf.py index 418509cb9..faed7ff7f 100644 --- a/youtube_dl/extractor/zdf.py +++ b/youtube_dl/extractor/zdf.py @@ -2,16 +2,14 @@ import re from .common import InfoExtractor from ..utils import ( + determine_ext, ExtractorError, - unescapeHTML, ) + class ZDFIE(InfoExtractor): - _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?' - _TITLE = r'<h1(?: class="beitragHeadline")?>(?P<title>.*)</h1>' + _VALID_URL = r'^http://www\.zdf\.de\/ZDFmediathek(?P<hash>#)?\/(.*beitrag\/video\/)(?P<video_id>[^/\?]+)(?:\?.*)?' _MEDIA_STREAM = r'<a href="(?P<video_url>.+(?P<media_type>.streaming).+/zdf/(?P<quality>[^\/]+)/[^"]*)".+class="play".+>' - _MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"' - _RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)' def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) @@ -19,6 +17,9 @@ class ZDFIE(InfoExtractor): raise ExtractorError(u'Invalid URL: %s' % url) video_id = mobj.group('video_id') + if mobj.group('hash'): + url = url.replace(u'#', u'', 1) + html = self._download_webpage(url, video_id) streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)] if streams is None: @@ -27,39 +28,48 @@ class ZDFIE(InfoExtractor): # s['media_type'] == 'wstreaming' -> use 'Windows Media Player' and mms url # s['media_type'] == 'hstreaming' -> use 'Quicktime' and rtsp url # choose first/default media type and highest quality for now - for s in streams: #find 300 - dsl1000mbit - if s['quality'] == '300' and s['media_type'] == 'wstreaming': - stream_=s - break - for s in streams: #find veryhigh - dsl2000mbit - if s['quality'] == 'veryhigh' and s['media_type'] == 'wstreaming': # 'hstreaming' - rtsp is not working - stream_=s - break - if stream_ is None: + def stream_pref(s): + TYPE_ORDER = ['ostreaming', 'hstreaming', 'wstreaming'] + try: + type_pref = TYPE_ORDER.index(s['media_type']) + except ValueError: + type_pref = 999 + + QUALITY_ORDER = ['veryhigh', '300'] + try: + quality_pref = QUALITY_ORDER.index(s['quality']) + except ValueError: + quality_pref = 999 + + return (type_pref, quality_pref) + + sorted_streams = sorted(streams, key=stream_pref) + if not sorted_streams: raise ExtractorError(u'No stream found.') + stream = sorted_streams[0] - media_link = self._download_webpage(stream_['video_url'], video_id,'Get stream URL') + media_link = self._download_webpage( + stream['video_url'], + video_id, + u'Get stream URL') - self.report_extraction(video_id) - mobj = re.search(self._TITLE, html) - if mobj is None: - raise ExtractorError(u'Cannot extract title') - title = unescapeHTML(mobj.group('title')) + MMS_STREAM = r'href="(?P<video_url>mms://[^"]*)"' + RTSP_STREAM = r'(?P<video_url>rtsp://[^"]*.mp4)' - mobj = re.search(self._MMS_STREAM, media_link) + mobj = re.search(self._MEDIA_STREAM, media_link) if mobj is None: - mobj = re.search(self._RTSP_STREAM, media_link) + mobj = re.search(RTSP_STREAM, media_link) if mobj is None: raise ExtractorError(u'Cannot extract mms:// or rtsp:// URL') - mms_url = mobj.group('video_url') + video_url = mobj.group('video_url') - mobj = re.search('(.*)[.](?P<ext>[^.]+)', mms_url) - if mobj is None: - raise ExtractorError(u'Cannot extract extention') - ext = mobj.group('ext') + title = self._html_search_regex( + r'<h1(?: class="beitragHeadline")?>(.*?)</h1>', + html, u'title') - return [{'id': video_id, - 'url': mms_url, - 'title': title, - 'ext': ext - }] + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'ext': determine_ext(video_url) + } diff --git a/youtube_dl/update.py b/youtube_dl/update.py index ccab6f27f..0689a4891 100644 --- a/youtube_dl/update.py +++ b/youtube_dl/update.py @@ -1,6 +1,9 @@ +import io import json import traceback import hashlib +import subprocess +import sys from zipimport import zipimporter from .utils import * @@ -34,7 +37,7 @@ def rsa_verify(message, signature, key): if signature != sha256(message).digest(): return False return True -def update_self(to_screen, verbose, filename): +def update_self(to_screen, verbose): """Update the program file with the latest version from the repository""" UPDATE_URL = "http://rg3.github.io/youtube-dl/update/" @@ -42,7 +45,6 @@ def update_self(to_screen, verbose, filename): JSON_URL = UPDATE_URL + 'versions.json' UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) - if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"): to_screen(u'It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.') return @@ -75,11 +77,18 @@ def update_self(to_screen, verbose, filename): to_screen(u'ERROR: the versions file signature is invalid. Aborting.') return - to_screen(u'Updating to version ' + versions_info['latest'] + '...') - version = versions_info['versions'][versions_info['latest']] + version_id = versions_info['latest'] + to_screen(u'Updating to version ' + version_id + '...') + version = versions_info['versions'][version_id] print_notes(to_screen, versions_info['versions']) + filename = sys.argv[0] + # Py2EXE: Filename could be different + if hasattr(sys, "frozen") and not os.path.isfile(filename): + if os.path.isfile(filename + u'.exe'): + filename += u'.exe' + if not os.access(filename, os.W_OK): to_screen(u'ERROR: no write permissions on %s' % filename) return @@ -116,16 +125,18 @@ def update_self(to_screen, verbose, filename): try: bat = os.path.join(directory, 'youtube-dl-updater.bat') - b = open(bat, 'w') - b.write(""" -echo Updating youtube-dl... + with io.open(bat, 'w') as batfile: + batfile.write(u""" +@echo off +echo Waiting for file handle to be closed ... ping 127.0.0.1 -n 5 -w 1000 > NUL -move /Y "%s.new" "%s" -del "%s" - \n""" %(exe, exe, bat)) - b.close() +move /Y "%s.new" "%s" > NUL +echo Updated youtube-dl to version %s. +start /b "" cmd /c del "%%~f0"&exit /b" + \n""" % (exe, exe, version_id)) - os.startfile(bat) + subprocess.Popen([bat]) # Continues to run in the background + return # Do not show premature success messages except (IOError, OSError) as err: if verbose: to_screen(compat_str(traceback.format_exc())) to_screen(u'ERROR: unable to overwrite current version') diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index cf2ea654e..3e81c308b 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -1,19 +1,21 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +import datetime +import email.utils import errno import gzip import io import json import locale import os +import pipes +import platform import re +import socket import sys import traceback import zlib -import email.utils -import socket -import datetime try: import urllib.request as compat_urllib_request @@ -61,6 +63,17 @@ except ImportError: # Python 2 import httplib as compat_http_client try: + from urllib.error import HTTPError as compat_HTTPError +except ImportError: # Python 2 + from urllib2 import HTTPError as compat_HTTPError + +try: + from urllib.request import urlretrieve as compat_urlretrieve +except ImportError: # Python 2 + from urllib import urlretrieve as compat_urlretrieve + + +try: from subprocess import DEVNULL compat_subprocess_get_DEVNULL = lambda: DEVNULL except ImportError: @@ -163,7 +176,7 @@ def compat_ord(c): compiled_regex_type = type(re.compile('')) std_headers = { - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', @@ -207,7 +220,7 @@ if sys.version_info >= (2,7): def find_xpath_attr(node, xpath, key, val): """ Find the xpath xpath[@key=val] """ assert re.match(r'^[a-zA-Z]+$', key) - assert re.match(r'^[a-zA-Z@]*$', val) + assert re.match(r'^[a-zA-Z0-9@\s]*$', val) expr = xpath + u"[@%s='%s']" % (key, val) return node.find(expr) else: @@ -217,6 +230,19 @@ else: return f return None +# On python2.6 the xml.etree.ElementTree.Element methods don't support +# the namespace parameter +def xpath_with_ns(path, ns_map): + components = [c.split(':') for c in path.split('/')] + replaced = [] + for c in components: + if len(c) == 1: + replaced.append(c[0]) + else: + ns, tag = c + replaced.append('{%s}%s' % (ns_map[ns], tag)) + return '/'.join(replaced) + def htmlentity_transform(matchobj): """Transforms an HTML entity to a character. @@ -243,7 +269,17 @@ def htmlentity_transform(matchobj): return (u'&%s;' % entity) compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix -class AttrParser(compat_html_parser.HTMLParser): +class BaseHTMLParser(compat_html_parser.HTMLParser): + def __init(self): + compat_html_parser.HTMLParser.__init__(self) + self.html = None + + def loads(self, html): + self.html = html + self.feed(html) + self.close() + +class AttrParser(BaseHTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute""" def __init__(self, attribute, value): self.attribute = attribute @@ -251,10 +287,9 @@ class AttrParser(compat_html_parser.HTMLParser): self.result = None self.started = False self.depth = {} - self.html = None self.watch_startpos = False self.error_count = 0 - compat_html_parser.HTMLParser.__init__(self) + BaseHTMLParser.__init__(self) def error(self, message): if self.error_count > 10 or self.started: @@ -263,11 +298,6 @@ class AttrParser(compat_html_parser.HTMLParser): self.error_count += 1 self.goahead(1) - def loads(self, html): - self.html = html - self.feed(html) - self.close() - def handle_starttag(self, tag, attrs): attrs = dict(attrs) if self.started: @@ -328,6 +358,38 @@ def get_element_by_attribute(attribute, value, html): pass return parser.get_result() +class MetaParser(BaseHTMLParser): + """ + Modified HTMLParser that isolates a meta tag with the specified name + attribute. + """ + def __init__(self, name): + BaseHTMLParser.__init__(self) + self.name = name + self.content = None + self.result = None + + def handle_starttag(self, tag, attrs): + if tag != 'meta': + return + attrs = dict(attrs) + if attrs.get('name') == self.name: + self.result = attrs.get('content') + + def get_result(self): + return self.result + +def get_meta_content(name, html): + """ + Return the content attribute from the meta tag with the given name attribute. + """ + parser = MetaParser(name) + try: + parser.loads(html) + except compat_html_parser.HTMLParseError: + pass + return parser.get_result() + def clean_html(html): """Clean an HTML snippet into a readable string""" @@ -489,7 +551,7 @@ def make_HTTPS_handler(opts): class ExtractorError(Exception): """Error during info extraction.""" - def __init__(self, msg, tb=None, expected=False): + def __init__(self, msg, tb=None, expected=False, cause=None): """ tb, if given, is the original traceback (so that it can be printed out). If expected is set, this is a normal error message and most likely not a bug in youtube-dl. """ @@ -497,11 +559,12 @@ class ExtractorError(Exception): if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError): expected = True if not expected: - msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output.' + msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update.' super(ExtractorError, self).__init__(msg) self.traceback = tb self.exc_info = sys.exc_info() # preserve original exception + self.cause = cause def format_traceback(self): if self.traceback is None: @@ -622,8 +685,23 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): old_resp = resp # gzip if resp.headers.get('Content-encoding', '') == 'gzip': - gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r') - resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) + content = resp.read() + gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') + try: + uncompressed = io.BytesIO(gz.read()) + except IOError as original_ioerror: + # There may be junk add the end of the file + # See http://stackoverflow.com/q/4928560/35070 for details + for i in range(1, 1024): + try: + gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') + uncompressed = io.BytesIO(gz.read()) + except IOError: + continue + break + else: + raise original_ioerror + resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg # deflate if resp.headers.get('Content-encoding', '') == 'deflate': @@ -642,7 +720,17 @@ def unified_strdate(date_str): date_str = date_str.replace(',',' ') # %z (UTC offset) is only supported in python>=3.2 date_str = re.sub(r' (\+|-)[\d]*$', '', date_str) - format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y', '%Y-%m-%d', '%d/%m/%Y', '%Y/%m/%d %H:%M:%S', '%d.%m.%Y %H:%M'] + format_expressions = [ + '%d %B %Y', + '%B %d %Y', + '%b %d %Y', + '%Y-%m-%d', + '%d/%m/%Y', + '%Y/%m/%d %H:%M:%S', + '%d.%m.%Y %H:%M', + '%Y-%m-%dT%H:%M:%SZ', + '%Y-%m-%dT%H:%M:%S', + ] for expression in format_expressions: try: upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') @@ -657,6 +745,9 @@ def determine_ext(url, default_ext=u'unknown_video'): else: return default_ext +def subtitles_filename(filename, sub_lang, sub_format): + return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format + def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or @@ -708,3 +799,149 @@ class DateRange(object): return self.start <= date <= self.end def __str__(self): return '%s - %s' % ( self.start.isoformat(), self.end.isoformat()) + + +def platform_name(): + """ Returns the platform name as a compat_str """ + res = platform.platform() + if isinstance(res, bytes): + res = res.decode(preferredencoding()) + + assert isinstance(res, compat_str) + return res + + +def write_string(s, out=None): + if out is None: + out = sys.stderr + assert type(s) == type(u'') + + if ('b' in getattr(out, 'mode', '') or + sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr + s = s.encode(preferredencoding(), 'ignore') + out.write(s) + out.flush() + + +def bytes_to_intlist(bs): + if not bs: + return [] + if isinstance(bs[0], int): # Python 3 + return list(bs) + else: + return [ord(c) for c in bs] + + +def intlist_to_bytes(xs): + if not xs: + return b'' + if isinstance(chr(0), bytes): # Python 2 + return ''.join([chr(x) for x in xs]) + else: + return bytes(xs) + + +def get_cachedir(params={}): + cache_root = os.environ.get('XDG_CACHE_HOME', + os.path.expanduser('~/.cache')) + return params.get('cachedir', os.path.join(cache_root, 'youtube-dl')) + + +# Cross-platform file locking +if sys.platform == 'win32': + import ctypes.wintypes + import msvcrt + + class OVERLAPPED(ctypes.Structure): + _fields_ = [ + ('Internal', ctypes.wintypes.LPVOID), + ('InternalHigh', ctypes.wintypes.LPVOID), + ('Offset', ctypes.wintypes.DWORD), + ('OffsetHigh', ctypes.wintypes.DWORD), + ('hEvent', ctypes.wintypes.HANDLE), + ] + + kernel32 = ctypes.windll.kernel32 + LockFileEx = kernel32.LockFileEx + LockFileEx.argtypes = [ + ctypes.wintypes.HANDLE, # hFile + ctypes.wintypes.DWORD, # dwFlags + ctypes.wintypes.DWORD, # dwReserved + ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow + ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh + ctypes.POINTER(OVERLAPPED) # Overlapped + ] + LockFileEx.restype = ctypes.wintypes.BOOL + UnlockFileEx = kernel32.UnlockFileEx + UnlockFileEx.argtypes = [ + ctypes.wintypes.HANDLE, # hFile + ctypes.wintypes.DWORD, # dwReserved + ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow + ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh + ctypes.POINTER(OVERLAPPED) # Overlapped + ] + UnlockFileEx.restype = ctypes.wintypes.BOOL + whole_low = 0xffffffff + whole_high = 0x7fffffff + + def _lock_file(f, exclusive): + overlapped = OVERLAPPED() + overlapped.Offset = 0 + overlapped.OffsetHigh = 0 + overlapped.hEvent = 0 + f._lock_file_overlapped_p = ctypes.pointer(overlapped) + handle = msvcrt.get_osfhandle(f.fileno()) + if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, + whole_low, whole_high, f._lock_file_overlapped_p): + raise OSError('Locking file failed: %r' % ctypes.FormatError()) + + def _unlock_file(f): + assert f._lock_file_overlapped_p + handle = msvcrt.get_osfhandle(f.fileno()) + if not UnlockFileEx(handle, 0, + whole_low, whole_high, f._lock_file_overlapped_p): + raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) + +else: + import fcntl + + def _lock_file(f, exclusive): + fcntl.lockf(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) + + def _unlock_file(f): + fcntl.lockf(f, fcntl.LOCK_UN) + + +class locked_file(object): + def __init__(self, filename, mode, encoding=None): + assert mode in ['r', 'a', 'w'] + self.f = io.open(filename, mode, encoding=encoding) + self.mode = mode + + def __enter__(self): + exclusive = self.mode != 'r' + try: + _lock_file(self.f, exclusive) + except IOError: + self.f.close() + raise + return self + + def __exit__(self, etype, value, traceback): + try: + _unlock_file(self.f) + finally: + self.f.close() + + def __iter__(self): + return iter(self.f) + + def write(self, *args): + return self.f.write(*args) + + def read(self, *args): + return self.f.read(*args) + + +def shell_quote(args): + return ' '.join(map(pipes.quote, args)) diff --git a/youtube_dl/version.py b/youtube_dl/version.py index 81e282cd9..1004af116 100644 --- a/youtube_dl/version.py +++ b/youtube_dl/version.py @@ -1,2 +1,2 @@ -__version__ = '2013.07.12' +__version__ = '2013.10.09' |