aboutsummaryrefslogtreecommitdiff
path: root/youtube_dl
diff options
context:
space:
mode:
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/__init__.py476
-rw-r--r--youtube_dl/extractor/__init__.py5
-rw-r--r--youtube_dl/extractor/adultswim.py8
-rw-r--r--youtube_dl/extractor/ard.py6
-rw-r--r--youtube_dl/extractor/br.py11
-rw-r--r--youtube_dl/extractor/chilloutzone.py2
-rw-r--r--youtube_dl/extractor/cloudy.py108
-rw-r--r--youtube_dl/extractor/daum.py17
-rw-r--r--youtube_dl/extractor/deezer.py89
-rw-r--r--youtube_dl/extractor/drtv.py2
-rw-r--r--youtube_dl/extractor/facebook.py30
-rw-r--r--youtube_dl/extractor/pornhd.py51
-rw-r--r--youtube_dl/extractor/pornoxo.py65
-rw-r--r--youtube_dl/extractor/prosiebensat1.py5
-rw-r--r--youtube_dl/extractor/spiegel.py42
-rw-r--r--youtube_dl/extractor/swrmediathek.py14
-rw-r--r--youtube_dl/extractor/telemb.py1
-rw-r--r--youtube_dl/extractor/tumblr.py22
-rw-r--r--youtube_dl/extractor/vporn.py60
-rw-r--r--youtube_dl/extractor/xhamster.py2
-rw-r--r--youtube_dl/extractor/youporn.py1
-rw-r--r--youtube_dl/extractor/youtube.py306
-rw-r--r--youtube_dl/options.py481
-rw-r--r--youtube_dl/utils.py29
-rw-r--r--youtube_dl/version.py2
25 files changed, 1111 insertions, 724 deletions
diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py
index 8f1a1a232..42d0a0180 100644
--- a/youtube_dl/__init__.py
+++ b/youtube_dl/__init__.py
@@ -81,20 +81,20 @@ __license__ = 'Public Domain'
import codecs
import io
-import optparse
import os
import random
-import shlex
import sys
+from .options import (
+ parseOpts,
+)
from .utils import (
compat_getpass,
compat_print,
DateRange,
DEFAULT_OUTTMPL,
decodeOption,
- get_term_width,
DownloadError,
MaxDownloadsReached,
preferredencoding,
@@ -109,7 +109,6 @@ from .downloader import (
FileDownloader,
)
from .extractor import gen_extractors
-from .version import __version__
from .YoutubeDL import YoutubeDL
from .postprocessor import (
AtomicParsleyPP,
@@ -123,475 +122,6 @@ from .postprocessor import (
)
-def parseOpts(overrideArguments=None):
- def _readOptions(filename_bytes, default=[]):
- try:
- optionf = open(filename_bytes)
- except IOError:
- return default # silently skip if file is not present
- try:
- res = []
- for l in optionf:
- res += shlex.split(l, comments=True)
- finally:
- optionf.close()
- return res
-
- def _readUserConf():
- xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
- if xdg_config_home:
- userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
- if not os.path.isfile(userConfFile):
- userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
- else:
- userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
- if not os.path.isfile(userConfFile):
- userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
- userConf = _readOptions(userConfFile, None)
-
- if userConf is None:
- appdata_dir = os.environ.get('appdata')
- if appdata_dir:
- userConf = _readOptions(
- os.path.join(appdata_dir, 'youtube-dl', 'config'),
- default=None)
- if userConf is None:
- userConf = _readOptions(
- os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
- default=None)
-
- if userConf is None:
- userConf = _readOptions(
- os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
- default=None)
- if userConf is None:
- userConf = _readOptions(
- os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
- default=None)
-
- if userConf is None:
- userConf = []
-
- return userConf
-
- def _format_option_string(option):
- ''' ('-o', '--option') -> -o, --format METAVAR'''
-
- opts = []
-
- if option._short_opts:
- opts.append(option._short_opts[0])
- if option._long_opts:
- opts.append(option._long_opts[0])
- if len(opts) > 1:
- opts.insert(1, ', ')
-
- if option.takes_value(): opts.append(' %s' % option.metavar)
-
- return "".join(opts)
-
- def _comma_separated_values_options_callback(option, opt_str, value, parser):
- setattr(parser.values, option.dest, value.split(','))
-
- def _hide_login_info(opts):
- opts = list(opts)
- for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
- try:
- i = opts.index(private_opt)
- opts[i+1] = '<PRIVATE>'
- except ValueError:
- pass
- return opts
-
- max_width = 80
- max_help_position = 80
-
- # No need to wrap help messages if we're on a wide console
- columns = get_term_width()
- if columns: max_width = columns
-
- fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
- fmt.format_option_strings = _format_option_string
-
- kw = {
- 'version' : __version__,
- 'formatter' : fmt,
- 'usage' : '%prog [options] url [url...]',
- 'conflict_handler' : 'resolve',
- }
-
- parser = optparse.OptionParser(**kw)
-
- # option groups
- general = optparse.OptionGroup(parser, 'General Options')
- selection = optparse.OptionGroup(parser, 'Video Selection')
- authentication = optparse.OptionGroup(parser, 'Authentication Options')
- video_format = optparse.OptionGroup(parser, 'Video Format Options')
- subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
- downloader = optparse.OptionGroup(parser, 'Download Options')
- postproc = optparse.OptionGroup(parser, 'Post-processing Options')
- filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
- workarounds = optparse.OptionGroup(parser, 'Workarounds')
- verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
-
- general.add_option('-h', '--help',
- action='help', help='print this help text and exit')
- general.add_option('-v', '--version',
- action='version', help='print program version and exit')
- general.add_option('-U', '--update',
- action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
- general.add_option('-i', '--ignore-errors',
- action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
- general.add_option('--abort-on-error',
- action='store_false', dest='ignoreerrors',
- help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
- general.add_option('--dump-user-agent',
- action='store_true', dest='dump_user_agent',
- help='display the current browser identification', default=False)
- general.add_option('--list-extractors',
- action='store_true', dest='list_extractors',
- help='List all supported extractors and the URLs they would handle', default=False)
- general.add_option('--extractor-descriptions',
- action='store_true', dest='list_extractor_descriptions',
- help='Output descriptions of all supported extractors', default=False)
- general.add_option(
- '--proxy', dest='proxy', default=None, metavar='URL',
- help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
- general.add_option(
- '--socket-timeout', dest='socket_timeout',
- type=float, default=None, help=u'Time to wait before giving up, in seconds')
- general.add_option(
- '--default-search',
- dest='default_search', metavar='PREFIX',
- help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
- general.add_option(
- '--ignore-config',
- action='store_true',
- help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
-
- selection.add_option(
- '--playlist-start',
- dest='playliststart', metavar='NUMBER', default=1, type=int,
- help='playlist video to start at (default is %default)')
- selection.add_option(
- '--playlist-end',
- dest='playlistend', metavar='NUMBER', default=None, type=int,
- help='playlist video to end at (default is last)')
- selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
- selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
- selection.add_option('--max-downloads', metavar='NUMBER',
- dest='max_downloads', type=int, default=None,
- help='Abort after downloading NUMBER files')
- selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
- selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
- selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
- selection.add_option(
- '--datebefore', metavar='DATE', dest='datebefore', default=None,
- help='download only videos uploaded on or before this date (i.e. inclusive)')
- selection.add_option(
- '--dateafter', metavar='DATE', dest='dateafter', default=None,
- help='download only videos uploaded on or after this date (i.e. inclusive)')
- selection.add_option(
- '--min-views', metavar='COUNT', dest='min_views',
- default=None, type=int,
- help="Do not download any videos with less than COUNT views",)
- selection.add_option(
- '--max-views', metavar='COUNT', dest='max_views',
- default=None, type=int,
- help="Do not download any videos with more than COUNT views",)
- selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
- selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
- help='download only videos suitable for the given age',
- default=None, type=int)
- selection.add_option('--download-archive', metavar='FILE',
- dest='download_archive',
- help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
- selection.add_option(
- '--include-ads', dest='include_ads',
- action='store_true',
- help='Download advertisements as well (experimental)')
- selection.add_option(
- '--youtube-include-dash-manifest', action='store_true',
- dest='youtube_include_dash_manifest', default=False,
- help='Try to download the DASH manifest on YouTube videos (experimental)')
-
- authentication.add_option('-u', '--username',
- dest='username', metavar='USERNAME', help='account username')
- authentication.add_option('-p', '--password',
- dest='password', metavar='PASSWORD', help='account password')
- authentication.add_option('-2', '--twofactor',
- dest='twofactor', metavar='TWOFACTOR', help='two-factor auth code')
- authentication.add_option('-n', '--netrc',
- action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
- authentication.add_option('--video-password',
- dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
-
-
- video_format.add_option('-f', '--format',
- action='store', dest='format', metavar='FORMAT', default=None,
- help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.')
- video_format.add_option('--all-formats',
- action='store_const', dest='format', help='download all available video formats', const='all')
- video_format.add_option('--prefer-free-formats',
- action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
- video_format.add_option('--max-quality',
- action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
- video_format.add_option('-F', '--list-formats',
- action='store_true', dest='listformats', help='list all available formats')
-
- subtitles.add_option('--write-sub', '--write-srt',
- action='store_true', dest='writesubtitles',
- help='write subtitle file', default=False)
- subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
- action='store_true', dest='writeautomaticsub',
- help='write automatic subtitle file (youtube only)', default=False)
- subtitles.add_option('--all-subs',
- action='store_true', dest='allsubtitles',
- help='downloads all the available subtitles of the video', default=False)
- subtitles.add_option('--list-subs',
- action='store_true', dest='listsubtitles',
- help='lists all available subtitles for the video', default=False)
- subtitles.add_option('--sub-format',
- action='store', dest='subtitlesformat', metavar='FORMAT',
- help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
- subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
- action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
- default=[], callback=_comma_separated_values_options_callback,
- help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
-
- downloader.add_option('-r', '--rate-limit',
- dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
- downloader.add_option('-R', '--retries',
- dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
- downloader.add_option('--buffer-size',
- dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
- downloader.add_option('--no-resize-buffer',
- action='store_true', dest='noresizebuffer',
- help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
- downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
-
- workarounds.add_option(
- '--encoding', dest='encoding', metavar='ENCODING',
- help='Force the specified encoding (experimental)')
- workarounds.add_option(
- '--no-check-certificate', action='store_true',
- dest='no_check_certificate', default=False,
- help='Suppress HTTPS certificate validation.')
- workarounds.add_option(
- '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
- help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
- workarounds.add_option(
- '--user-agent', metavar='UA',
- dest='user_agent', help='specify a custom user agent')
- workarounds.add_option(
- '--referer', metavar='REF',
- dest='referer', default=None,
- help='specify a custom referer, use if the video access is restricted to one domain',
- )
- workarounds.add_option(
- '--add-header', metavar='FIELD:VALUE',
- dest='headers', action='append',
- help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
- )
- workarounds.add_option(
- '--bidi-workaround', dest='bidi_workaround', action='store_true',
- help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
-
- verbosity.add_option('-q', '--quiet',
- action='store_true', dest='quiet', help='activates quiet mode', default=False)
- verbosity.add_option(
- '--no-warnings',
- dest='no_warnings', action='store_true', default=False,
- help='Ignore warnings')
- verbosity.add_option('-s', '--simulate',
- action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
- verbosity.add_option('--skip-download',
- action='store_true', dest='skip_download', help='do not download the video', default=False)
- verbosity.add_option('-g', '--get-url',
- action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
- verbosity.add_option('-e', '--get-title',
- action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
- verbosity.add_option('--get-id',
- action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
- verbosity.add_option('--get-thumbnail',
- action='store_true', dest='getthumbnail',
- help='simulate, quiet but print thumbnail URL', default=False)
- verbosity.add_option('--get-description',
- action='store_true', dest='getdescription',
- help='simulate, quiet but print video description', default=False)
- verbosity.add_option('--get-duration',
- action='store_true', dest='getduration',
- help='simulate, quiet but print video length', default=False)
- verbosity.add_option('--get-filename',
- action='store_true', dest='getfilename',
- help='simulate, quiet but print output filename', default=False)
- verbosity.add_option('--get-format',
- action='store_true', dest='getformat',
- help='simulate, quiet but print output format', default=False)
- verbosity.add_option('-j', '--dump-json',
- action='store_true', dest='dumpjson',
- help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
- verbosity.add_option('--newline',
- action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
- verbosity.add_option('--no-progress',
- action='store_true', dest='noprogress', help='do not print progress bar', default=False)
- verbosity.add_option('--console-title',
- action='store_true', dest='consoletitle',
- help='display progress in console titlebar', default=False)
- verbosity.add_option('-v', '--verbose',
- action='store_true', dest='verbose', help='print various debugging information', default=False)
- verbosity.add_option('--dump-intermediate-pages',
- action='store_true', dest='dump_intermediate_pages', default=False,
- help='print downloaded pages to debug problems (very verbose)')
- verbosity.add_option('--write-pages',
- action='store_true', dest='write_pages', default=False,
- help='Write downloaded intermediary pages to files in the current directory to debug problems')
- verbosity.add_option('--youtube-print-sig-code',
- action='store_true', dest='youtube_print_sig_code', default=False,
- help=optparse.SUPPRESS_HELP)
- verbosity.add_option('--print-traffic',
- dest='debug_printtraffic', action='store_true', default=False,
- help='Display sent and read HTTP traffic')
-
-
- filesystem.add_option('-a', '--batch-file',
- dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
- filesystem.add_option('--id',
- action='store_true', dest='useid', help='use only video ID in file name', default=False)
- filesystem.add_option('-A', '--auto-number',
- action='store_true', dest='autonumber',
- help='number downloaded files starting from 00000', default=False)
- filesystem.add_option('-o', '--output',
- dest='outtmpl', metavar='TEMPLATE',
- help=('output filename template. Use %(title)s to get the title, '
- '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
- '%(autonumber)s to get an automatically incremented number, '
- '%(ext)s for the filename extension, '
- '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
- '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
- '%(upload_date)s for the upload date (YYYYMMDD), '
- '%(extractor)s for the provider (youtube, metacafe, etc), '
- '%(id)s for the video id, %(playlist)s for the playlist the video is in, '
- '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
- '%(height)s and %(width)s for the width and height of the video format. '
- '%(resolution)s for a textual description of the resolution of the video format. '
- 'Use - to output to stdout. Can also be used to download to a different directory, '
- 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
- filesystem.add_option('--autonumber-size',
- dest='autonumber_size', metavar='NUMBER',
- help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
- filesystem.add_option('--restrict-filenames',
- action='store_true', dest='restrictfilenames',
- help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
- filesystem.add_option('-t', '--title',
- action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
- filesystem.add_option('-l', '--literal',
- action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
- filesystem.add_option('-w', '--no-overwrites',
- action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
- filesystem.add_option('-c', '--continue',
- action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
- filesystem.add_option('--no-continue',
- action='store_false', dest='continue_dl',
- help='do not resume partially downloaded files (restart from beginning)')
- filesystem.add_option('--no-part',
- action='store_true', dest='nopart', help='do not use .part files', default=False)
- filesystem.add_option('--no-mtime',
- action='store_false', dest='updatetime',
- help='do not use the Last-modified header to set the file modification time', default=True)
- filesystem.add_option('--write-description',
- action='store_true', dest='writedescription',
- help='write video description to a .description file', default=False)
- filesystem.add_option('--write-info-json',
- action='store_true', dest='writeinfojson',
- help='write video metadata to a .info.json file', default=False)
- filesystem.add_option('--write-annotations',
- action='store_true', dest='writeannotations',
- help='write video annotations to a .annotation file', default=False)
- filesystem.add_option('--write-thumbnail',
- action='store_true', dest='writethumbnail',
- help='write thumbnail image to disk', default=False)
- filesystem.add_option('--load-info',
- dest='load_info_filename', metavar='FILE',
- help='json file containing the video information (created with the "--write-json" option)')
- filesystem.add_option('--cookies',
- dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
- filesystem.add_option(
- '--cache-dir', dest='cachedir', default=None, metavar='DIR',
- help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
- filesystem.add_option(
- '--no-cache-dir', action='store_const', const=False, dest='cachedir',
- help='Disable filesystem caching')
- filesystem.add_option(
- '--rm-cache-dir', action='store_true', dest='rm_cachedir',
- help='Delete all filesystem cache files')
-
-
- postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
- help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
- postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
- help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
- postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
- help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
- postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
- help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
- postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
- help='keeps the video file on disk after the post-processing; the video is erased by default')
- postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
- help='do not overwrite post-processed files; the post-processed files are overwritten by default')
- postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
- help='embed subtitles in the video (only for mp4 videos)')
- postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
- help='embed thumbnail in the audio as cover art')
- postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
- help='write metadata to the video file')
- postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
- help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
- postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
- help='Prefer avconv over ffmpeg for running the postprocessors (default)')
- postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
- help='Prefer ffmpeg over avconv for running the postprocessors')
- postproc.add_option(
- '--exec', metavar='CMD', dest='exec_cmd',
- help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
-
- parser.add_option_group(general)
- parser.add_option_group(selection)
- parser.add_option_group(downloader)
- parser.add_option_group(filesystem)
- parser.add_option_group(verbosity)
- parser.add_option_group(workarounds)
- parser.add_option_group(video_format)
- parser.add_option_group(subtitles)
- parser.add_option_group(authentication)
- parser.add_option_group(postproc)
-
- if overrideArguments is not None:
- opts, args = parser.parse_args(overrideArguments)
- if opts.verbose:
- write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
- else:
- commandLineConf = sys.argv[1:]
- if '--ignore-config' in commandLineConf:
- systemConf = []
- userConf = []
- else:
- systemConf = _readOptions('/etc/youtube-dl.conf')
- if '--ignore-config' in systemConf:
- userConf = []
- else:
- userConf = _readUserConf()
- argv = systemConf + userConf + commandLineConf
-
- opts, args = parser.parse_args(argv)
- if opts.verbose:
- write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
- write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
- write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
-
- return parser, opts, args
-
-
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index 13b3616d3..48683ebcc 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -46,6 +46,7 @@ from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
+from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
@@ -68,6 +69,7 @@ from .dailymotion import (
)
from .daum import DaumIE
from .dbtv import DBTVIE
+from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dotsub import DotsubIE
from .dreisat import DreiSatIE
@@ -266,6 +268,7 @@ from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE
from .pornotube import PornotubeIE
+from .pornoxo import PornoXOIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .pyvideo import PyvideoIE
@@ -324,7 +327,7 @@ from .southpark import (
)
from .space import SpaceIE
from .spankwire import SpankwireIE
-from .spiegel import SpiegelIE
+from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sportdeutschland import SportDeutschlandIE
diff --git a/youtube_dl/extractor/adultswim.py b/youtube_dl/extractor/adultswim.py
index a00bfcb35..b4b40f2d4 100644
--- a/youtube_dl/extractor/adultswim.py
+++ b/youtube_dl/extractor/adultswim.py
@@ -75,7 +75,9 @@ class AdultSwimIE(InfoExtractor):
video_path = mobj.group('path')
webpage = self._download_webpage(url, video_path)
- episode_id = self._html_search_regex(r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>', webpage, 'episode_id')
+ episode_id = self._html_search_regex(
+ r'<link rel="video_src" href="http://i\.adultswim\.com/adultswim/adultswimtv/tools/swf/viralplayer.swf\?id=([0-9a-f]+?)"\s*/?\s*>',
+ webpage, 'episode_id')
title = self._og_search_title(webpage)
index_url = 'http://asfix.adultswim.com/asfix-svc/episodeSearch/getEpisodesByIDs?networkName=AS&ids=%s' % episode_id
@@ -97,7 +99,9 @@ class AdultSwimIE(InfoExtractor):
duration = segment_el.attrib.get('duration')
segment_url = 'http://asfix.adultswim.com/asfix-svc/episodeservices/getCvpPlaylist?networkName=AS&id=%s' % segment_id
- idoc = self._download_xml(segment_url, segment_title, 'Downloading segment information', 'Unable to download segment information')
+ idoc = self._download_xml(
+ segment_url, segment_title,
+ 'Downloading segment information', 'Unable to download segment information')
formats = []
file_els = idoc.findall('.//files/file')
diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
index ef94c7239..12457f0f9 100644
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -13,6 +13,7 @@ from ..utils import (
int_or_none,
parse_duration,
unified_strdate,
+ xpath_text,
)
@@ -157,8 +158,9 @@ class ARDIE(InfoExtractor):
player_url = mobj.group('mainurl') + '~playerXml.xml'
doc = self._download_xml(player_url, display_id)
video_node = doc.find('./video')
- upload_date = unified_strdate(video_node.find('./broadcastDate').text)
- thumbnail = video_node.find('.//teaserImage//variant/url').text
+ upload_date = unified_strdate(xpath_text(
+ video_node, './broadcastDate'))
+ thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
formats = []
for a in video_node.findall('.//asset'):
diff --git a/youtube_dl/extractor/br.py b/youtube_dl/extractor/br.py
index 86f0c2861..4e2960c62 100644
--- a/youtube_dl/extractor/br.py
+++ b/youtube_dl/extractor/br.py
@@ -29,17 +29,6 @@ class BRIE(InfoExtractor):
}
},
{
- 'url': 'http://www.br.de/mediathek/video/sendungen/unter-unserem-himmel/unter-unserem-himmel-alpen-ueber-den-pass-100.html',
- 'md5': 'ab451b09d861dbed7d7cc9ab0be19ebe',
- 'info_dict': {
- 'id': '2c060e69-3a27-4e13-b0f0-668fac17d812',
- 'ext': 'mp4',
- 'title': 'Ãœber den Pass',
- 'description': 'Die Eroberung der Alpen: Ãœber den Pass',
- 'duration': 2588,
- }
- },
- {
'url': 'http://www.br.de/nachrichten/schaeuble-haushaltsentwurf-bundestag-100.html',
'md5': '3db0df1a9a9cd9fa0c70e6ea8aa8e820',
'info_dict': {
diff --git a/youtube_dl/extractor/chilloutzone.py b/youtube_dl/extractor/chilloutzone.py
index a62395d4b..c922f6959 100644
--- a/youtube_dl/extractor/chilloutzone.py
+++ b/youtube_dl/extractor/chilloutzone.py
@@ -42,7 +42,7 @@ class ChilloutzoneIE(InfoExtractor):
'id': '85523671',
'ext': 'mp4',
'title': 'The Sunday Times - Icons',
- 'description': 'md5:a5f7ff82e2f7a9ed77473fe666954e84',
+ 'description': 're:(?s)^Watch the making of - makingoficons.com.{300,}',
'uploader': 'Us',
'uploader_id': 'usfilms',
'upload_date': '20140131'
diff --git a/youtube_dl/extractor/cloudy.py b/youtube_dl/extractor/cloudy.py
new file mode 100644
index 000000000..386f080d2
--- /dev/null
+++ b/youtube_dl/extractor/cloudy.py
@@ -0,0 +1,108 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ compat_parse_qs,
+ compat_urllib_parse,
+ remove_end,
+ HEADRequest,
+ compat_HTTPError,
+)
+
+
+class CloudyIE(InfoExtractor):
+ _IE_DESC = 'cloudy.ec and videoraj.ch'
+ _VALID_URL = r'''(?x)
+ https?://(?:www\.)?(?P<host>cloudy\.ec|videoraj\.ch)/
+ (?:v/|embed\.php\?id=)
+ (?P<id>[A-Za-z0-9]+)
+ '''
+ _EMBED_URL = 'http://www.%s/embed.php?id=%s'
+ _API_URL = 'http://www.%s/api/player.api.php?%s'
+ _MAX_TRIES = 2
+ _TESTS = [
+ {
+ 'url': 'https://www.cloudy.ec/v/af511e2527aac',
+ 'md5': '5cb253ace826a42f35b4740539bedf07',
+ 'info_dict': {
+ 'id': 'af511e2527aac',
+ 'ext': 'flv',
+ 'title': 'Funny Cats and Animals Compilation june 2013',
+ }
+ },
+ {
+ 'url': 'http://www.videoraj.ch/v/47f399fd8bb60',
+ 'md5': '7d0f8799d91efd4eda26587421c3c3b0',
+ 'info_dict': {
+ 'id': '47f399fd8bb60',
+ 'ext': 'flv',
+ 'title': 'Burning a New iPhone 5 with Gasoline - Will it Survive?',
+ }
+ }
+ ]
+
+ def _extract_video(self, video_host, video_id, file_key, error_url=None, try_num=0):
+
+ if try_num > self._MAX_TRIES - 1:
+ raise ExtractorError('Unable to extract video URL', expected=True)
+
+ form = {
+ 'file': video_id,
+ 'key': file_key,
+ }
+
+ if error_url:
+ form.update({
+ 'numOfErrors': try_num,
+ 'errorCode': '404',
+ 'errorUrl': error_url,
+ })
+
+ data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
+ player_data = self._download_webpage(
+ data_url, video_id, 'Downloading player data')
+ data = compat_parse_qs(player_data)
+
+ try_num += 1
+
+ if 'error' in data:
+ raise ExtractorError(
+ '%s error: %s' % (self.IE_NAME, ' '.join(data['error_msg'])),
+ expected=True)
+
+ title = data.get('title', [None])[0]
+ if title:
+ title = remove_end(title, '&asdasdas').strip()
+
+ video_url = data.get('url', [None])[0]
+
+ if video_url:
+ try:
+ self._request_webpage(HEADRequest(video_url), video_id, 'Checking video URL')
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code in [404, 410]:
+ self.report_warning('Invalid video URL, requesting another', video_id)
+ return self._extract_video(video_host, video_id, file_key, video_url, try_num)
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_host = mobj.group('host')
+ video_id = mobj.group('id')
+
+ url = self._EMBED_URL % (video_host, video_id)
+ webpage = self._download_webpage(url, video_id)
+
+ file_key = self._search_regex(
+ r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
+
+ return self._extract_video(video_host, video_id, file_key)
diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py
index 6033cd94a..45d66e2e6 100644
--- a/youtube_dl/extractor/daum.py
+++ b/youtube_dl/extractor/daum.py
@@ -11,10 +11,10 @@ from ..utils import (
class DaumIE(InfoExtractor):
- _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
- _TEST = {
+ _TESTS = [{
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
'info_dict': {
'id': '52554690',
@@ -24,11 +24,17 @@ class DaumIE(InfoExtractor):
'upload_date': '20130831',
'duration': 3868,
},
- }
+ }, {
+ 'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
+ 'only_matching': True,
+ }, {
+ 'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
+ 'only_matching': True,
+ }]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
- video_id = mobj.group(1)
+ video_id = mobj.group('id')
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
webpage = self._download_webpage(canonical_url, video_id)
full_id = self._search_regex(
@@ -42,7 +48,6 @@ class DaumIE(InfoExtractor):
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
video_id, 'Downloading video formats info')
- self.to_screen(u'%s: Getting video urls' % video_id)
formats = []
for format_el in urls.findall('result/output_list/output_list'):
profile = format_el.attrib['profile']
@@ -52,7 +57,7 @@ class DaumIE(InfoExtractor):
})
url_doc = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
- video_id, note=False)
+ video_id, note='Downloading video data for %s format' % profile)
format_url = url_doc.find('result/url').text
formats.append({
'url': format_url,
diff --git a/youtube_dl/extractor/deezer.py b/youtube_dl/extractor/deezer.py
new file mode 100644
index 000000000..c3205ff5f
--- /dev/null
+++ b/youtube_dl/extractor/deezer.py
@@ -0,0 +1,89 @@
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ ExtractorError,
+ int_or_none,
+ orderedSet,
+)
+
+
+class DeezerPlaylistIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?deezer\.com/playlist/(?P<id>[0-9]+)'
+ _TEST = {
+ 'url': 'http://www.deezer.com/playlist/176747451',
+ 'info_dict': {
+ 'id': '176747451',
+ 'title': 'Best!',
+ 'uploader': 'Anonymous',
+ 'thumbnail': 're:^https?://cdn-images.deezer.com/images/cover/.*\.jpg$',
+ },
+ 'playlist_count': 30,
+ 'skip': 'Only available in .de',
+ }
+
+ def _real_extract(self, url):
+ if 'test' not in self._downloader.params:
+ self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
+
+ mobj = re.match(self._VALID_URL, url)
+ playlist_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, playlist_id)
+ geoblocking_msg = self._html_search_regex(
+ r'<p class="soon-txt">(.*?)</p>', webpage, 'geoblocking message',
+ default=None)
+ if geoblocking_msg is not None:
+ raise ExtractorError(
+ 'Deezer said: %s' % geoblocking_msg, expected=True)
+
+ data_json = self._search_regex(
+ r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n', webpage, 'data JSON')
+ data = json.loads(data_json)
+
+ playlist_title = data.get('DATA', {}).get('TITLE')
+ playlist_uploader = data.get('DATA', {}).get('PARENT_USERNAME')
+ playlist_thumbnail = self._search_regex(
+ r'<img id="naboo_playlist_image".*?src="([^"]+)"', webpage,
+ 'playlist thumbnail')
+
+ preview_pattern = self._search_regex(
+ r"var SOUND_PREVIEW_GATEWAY\s*=\s*'([^']+)';", webpage,
+ 'preview URL pattern', fatal=False)
+ entries = []
+ for s in data['SONGS']['data']:
+ puid = s['MD5_ORIGIN']
+ preview_video_url = preview_pattern.\
+ replace('{0}', puid[0]).\
+ replace('{1}', puid).\
+ replace('{2}', s['MEDIA_VERSION'])
+ formats = [{
+ 'format_id': 'preview',
+ 'url': preview_video_url,
+ 'preference': -100, # Only the first 30 seconds
+ 'ext': 'mp3',
+ }]
+ self._sort_formats(formats)
+ artists = ', '.join(
+ orderedSet(a['ART_NAME'] for a in s['ARTISTS']))
+ entries.append({
+ 'id': s['SNG_ID'],
+ 'duration': int_or_none(s.get('DURATION')),
+ 'title': '%s - %s' % (artists, s['SNG_TITLE']),
+ 'uploader': s['ART_NAME'],
+ 'uploader_id': s['ART_ID'],
+ 'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
+ 'formats': formats,
+ })
+
+ return {
+ '_type': 'playlist',
+ 'id': playlist_id,
+ 'title': playlist_title,
+ 'uploader': playlist_uploader,
+ 'thumbnail': playlist_thumbnail,
+ 'entries': entries,
+ }
diff --git a/youtube_dl/extractor/drtv.py b/youtube_dl/extractor/drtv.py
index cdccfd376..9d6ce1f48 100644
--- a/youtube_dl/extractor/drtv.py
+++ b/youtube_dl/extractor/drtv.py
@@ -8,7 +8,7 @@ from ..utils import parse_iso8601
class DRTVIE(SubtitlesInfoExtractor):
- _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/[^/]+/(?P<id>[\da-z-]+)'
+ _VALID_URL = r'http://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)+(?P<id>[\da-z-]+)(?:[/#?]|$)'
_TEST = {
'url': 'http://www.dr.dk/tv/se/partiets-mand/partiets-mand-7-8',
diff --git a/youtube_dl/extractor/facebook.py b/youtube_dl/extractor/facebook.py
index afb34ce51..60e68d98a 100644
--- a/youtube_dl/extractor/facebook.py
+++ b/youtube_dl/extractor/facebook.py
@@ -12,8 +12,8 @@ from ..utils import (
compat_urllib_parse,
compat_urllib_request,
urlencode_postdata,
-
ExtractorError,
+ limit_length,
)
@@ -29,13 +29,21 @@ class FacebookIE(InfoExtractor):
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_TESTS = [{
- 'url': 'https://www.facebook.com/photo.php?v=120708114770723',
- 'md5': '48975a41ccc4b7a581abd68651c1a5a8',
+ 'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
+ 'md5': '6a40d33c0eccbb1af76cf0485a052659',
+ 'info_dict': {
+ 'id': '637842556329505',
+ 'ext': 'mp4',
+ 'duration': 38,
+ 'title': 'Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam fin...',
+ }
+ }, {
+ 'note': 'Video without discernible title',
+ 'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
- 'id': '120708114770723',
+ 'id': '274175099429670',
'ext': 'mp4',
- 'duration': 279,
- 'title': 'PEOPLE ARE AWESOME 2013',
+ 'title': 'Facebook video #274175099429670',
}
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
@@ -125,7 +133,15 @@ class FacebookIE(InfoExtractor):
raise ExtractorError('Cannot find video URL')
video_title = self._html_search_regex(
- r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title')
+ r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
+ fatal=False)
+ if not video_title:
+ video_title = self._html_search_regex(
+ r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
+ webpage, 'alternative title', default=None)
+ video_title = limit_length(video_title, 80)
+ if not video_title:
+ video_title = 'Facebook video #%s' % video_id
return {
'id': video_id,
diff --git a/youtube_dl/extractor/pornhd.py b/youtube_dl/extractor/pornhd.py
index 718fe9aba..48ce6e730 100644
--- a/youtube_dl/extractor/pornhd.py
+++ b/youtube_dl/extractor/pornhd.py
@@ -27,47 +27,40 @@ class PornHdIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
- title = self._og_search_title(webpage)
- TITLE_SUFFIX = ' porn HD Video | PornHD.com '
- if title.endswith(TITLE_SUFFIX):
- title = title[:-len(TITLE_SUFFIX)]
-
+ title = self._html_search_regex(
+ r'<title>(.+) porn HD.+?</title>', webpage, 'title')
description = self._html_search_regex(
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
view_count = int_or_none(self._html_search_regex(
- r'(\d+) views </span>', webpage, 'view count', fatal=False))
+ r'(\d+) views\s*</span>', webpage, 'view count', fatal=False))
- formats = [
- {
- 'url': format_url,
- 'ext': format.lower(),
- 'format_id': '%s-%s' % (format.lower(), quality.lower()),
- 'quality': 1 if quality.lower() == 'high' else 0,
- } for format, quality, format_url in re.findall(
- r'var __video([\da-zA-Z]+?)(Low|High)StreamUrl = \'(http://.+?)\?noProxy=1\'', webpage)
- ]
+ videos = re.findall(
+ r'var __video([\da-zA-Z]+?)(Low|High)StreamUrl = \'(http://.+?)\?noProxy=1\'', webpage)
mobj = re.search(r'flashVars = (?P<flashvars>{.+?});', webpage)
if mobj:
flashvars = json.loads(mobj.group('flashvars'))
- formats.extend([
- {
- 'url': flashvars['hashlink'].replace('?noProxy=1', ''),
- 'ext': 'flv',
- 'format_id': 'flv-low',
- 'quality': 0,
- },
- {
- 'url': flashvars['hd'].replace('?noProxy=1', ''),
- 'ext': 'flv',
- 'format_id': 'flv-high',
- 'quality': 1,
- }
- ])
+ for key, quality in [('hashlink', 'low'), ('hd', 'high')]:
+ redirect_url = flashvars.get(key)
+ if redirect_url:
+ videos.append(('flv', quality, redirect_url))
thumbnail = flashvars['urlWallpaper']
else:
thumbnail = self._og_search_thumbnail(webpage)
+ formats = []
+ for format_, quality, redirect_url in videos:
+ format_id = '%s-%s' % (format_.lower(), quality.lower())
+ video_url = self._download_webpage(
+ redirect_url, video_id, 'Downloading %s video link' % format_id, fatal=False)
+ if not video_url:
+ continue
+ formats.append({
+ 'url': video_url,
+ 'ext': format_.lower(),
+ 'format_id': format_id,
+ 'quality': 1 if quality.lower() == 'high' else 0,
+ })
self._sort_formats(formats)
return {
diff --git a/youtube_dl/extractor/pornoxo.py b/youtube_dl/extractor/pornoxo.py
new file mode 100644
index 000000000..202f58673
--- /dev/null
+++ b/youtube_dl/extractor/pornoxo.py
@@ -0,0 +1,65 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ str_to_int,
+)
+
+
+class PornoXOIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?pornoxo\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)\.html'
+ _TEST = {
+ 'url': 'http://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary.html',
+ 'md5': '582f28ecbaa9e6e24cb90f50f524ce87',
+ 'info_dict': {
+ 'id': '7564',
+ 'ext': 'flv',
+ 'title': 'Striptease From Sexy Secretary!',
+ 'description': 'Striptease From Sexy Secretary!',
+ 'categories': list, # NSFW
+ 'thumbnail': 're:https?://.*\.jpg$',
+ 'age_limit': 18,
+ }
+ }
+
+ def _real_extract(self, url):
+ mobj = re.match(self._VALID_URL, url)
+ video_id = mobj.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+
+ video_url = self._html_search_regex(
+ r'\'file\'\s*:\s*"([^"]+)"', webpage, 'video_url')
+
+ title = self._html_search_regex(
+ r'<title>([^<]+)\s*-\s*PornoXO', webpage, 'title')
+
+ description = self._html_search_regex(
+ r'<meta name="description" content="([^"]+)\s*featuring',
+ webpage, 'description', fatal=False)
+
+ thumbnail = self._html_search_regex(
+ r'\'image\'\s*:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
+
+ view_count = str_to_int(self._html_search_regex(
+ r'[vV]iews:\s*([0-9,]+)', webpage, 'view count', fatal=False))
+
+ categories_str = self._html_search_regex(
+ r'<meta name="description" content=".*featuring\s*([^"]+)"',
+ webpage, 'categories', fatal=False)
+ categories = (
+ None if categories_str is None
+ else categories_str.split(','))
+
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': title,
+ 'description': description,
+ 'thumbnail': thumbnail,
+ 'categories': categories,
+ 'view_count': view_count,
+ 'age_limit': 18,
+ }
diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index da64a1a7b..5b2a723c1 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -145,7 +145,6 @@ class ProSiebenSat1IE(InfoExtractor):
'ext': 'mp4',
'title': 'Kurztrips zum Valentinstag',
'description': 'md5:8ba6301e70351ae0bedf8da00f7ba528',
- 'upload_date': '20130206',
'duration': 307.24,
},
'params': {
@@ -240,7 +239,7 @@ class ProSiebenSat1IE(InfoExtractor):
thumbnail = self._og_search_thumbnail(page)
upload_date = unified_strdate(self._html_search_regex(
- self._UPLOAD_DATE_REGEXES, page, 'upload date', fatal=False))
+ self._UPLOAD_DATE_REGEXES, page, 'upload date', default=None))
formats = []
@@ -249,7 +248,7 @@ class ProSiebenSat1IE(InfoExtractor):
urls_sources = urls_sources.values()
def fix_bitrate(bitrate):
- return bitrate / 1000 if bitrate % 1000 == 0 else bitrate
+ return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
for source in urls_sources:
protocol = source['protocol']
diff --git a/youtube_dl/extractor/spiegel.py b/youtube_dl/extractor/spiegel.py
index 340a38440..9ed7d3b39 100644
--- a/youtube_dl/extractor/spiegel.py
+++ b/youtube_dl/extractor/spiegel.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
+from ..utils import compat_urlparse
class SpiegelIE(InfoExtractor):
@@ -28,16 +29,6 @@ class SpiegelIE(InfoExtractor):
'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
'duration': 983,
},
- }, {
- 'url': 'http://www.spiegel.de/video/johann-westhauser-videobotschaft-des-hoehlenforschers-video-1502367.html',
- 'md5': '54f58ba0e752e3c07bc2a26222dd0acf',
- 'info_dict': {
- 'id': '1502367',
- 'ext': 'mp4',
- 'title': 'Videobotschaft: Höhlenforscher Westhauser dankt seinen Rettern',
- 'description': 'md5:c6f1ec11413ebd1088b6813943e5fc91',
- 'duration': 42,
- },
}]
def _real_extract(self, url):
@@ -82,3 +73,34 @@ class SpiegelIE(InfoExtractor):
'duration': duration,
'formats': formats,
}
+
+
+class SpiegelArticleIE(InfoExtractor):
+ _VALID_URL = 'https?://www\.spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
+ IE_NAME = 'Spiegel:Article'
+ IE_DESC = 'Articles on spiegel.de'
+ _TEST = {
+ 'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
+ 'info_dict': {
+ 'id': '1516455',
+ 'ext': 'mp4',
+ 'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
+ 'description': 're:^Patrick Kämnitz gehört.{100,}',
+ },
+ }
+
+ def _real_extract(self, url):
+ m = re.match(self._VALID_URL, url)
+ video_id = m.group('id')
+
+ webpage = self._download_webpage(url, video_id)
+ video_link = self._search_regex(
+ r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
+ 'video page URL')
+ video_url = compat_urlparse.urljoin(
+ self.http_scheme() + '//spiegel.de/', video_link)
+
+ return {
+ '_type': 'url',
+ 'url': video_url,
+ }
diff --git a/youtube_dl/extractor/swrmediathek.py b/youtube_dl/extractor/swrmediathek.py
index 5d9d70367..13c6ea677 100644
--- a/youtube_dl/extractor/swrmediathek.py
+++ b/youtube_dl/extractor/swrmediathek.py
@@ -52,20 +52,6 @@ class SWRMediathekIE(InfoExtractor):
'uploader': 'SWR 2',
'uploader_id': '284670',
}
- }, {
- 'url': 'http://swrmediathek.de/content/player.htm?show=52dc7e00-15c5-11e4-84bc-0026b975f2e6',
- 'md5': '881531487d0633080a8cc88d31ef896f',
- 'info_dict': {
- 'id': '52dc7e00-15c5-11e4-84bc-0026b975f2e6',
- 'ext': 'mp4',
- 'title': 'Familienspaß am Bodensee',
- 'description': 'md5:0b591225a32cfde7be1629ed49fe4315',
- 'thumbnail': 're:http://.*\.jpg',
- 'duration': 1784,
- 'upload_date': '20140727',
- 'uploader': 'SWR Fernsehen BW',
- 'uploader_id': '281130',
- }
}]
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/telemb.py b/youtube_dl/extractor/telemb.py
index cf5bb89b1..1bbd0e7bd 100644
--- a/youtube_dl/extractor/telemb.py
+++ b/youtube_dl/extractor/telemb.py
@@ -23,6 +23,7 @@ class TeleMBIE(InfoExtractor):
}
},
{
+ # non-ASCII characters in download URL
'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html',
'md5': '6e9682736e5ccd4eab7f21e855350733',
'info_dict': {
diff --git a/youtube_dl/extractor/tumblr.py b/youtube_dl/extractor/tumblr.py
index 2882c1809..306fe8974 100644
--- a/youtube_dl/extractor/tumblr.py
+++ b/youtube_dl/extractor/tumblr.py
@@ -10,7 +10,7 @@ from ..utils import (
class TumblrIE(InfoExtractor):
- _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)($|/)'
+ _VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
@@ -56,13 +56,15 @@ class TumblrIE(InfoExtractor):
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
- video_title = self._html_search_regex(r'<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
- webpage, 'title', flags=re.DOTALL)
+ video_title = self._html_search_regex(
+ r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
+ webpage, 'title')
- return [{'id': video_id,
- 'url': video_url,
- 'title': video_title,
- 'description': self._html_search_meta('description', webpage),
- 'thumbnail': video_thumbnail,
- 'ext': ext
- }]
+ return {
+ 'id': video_id,
+ 'url': video_url,
+ 'title': video_title,
+ 'description': self._html_search_meta('description', webpage),
+ 'thumbnail': video_thumbnail,
+ 'ext': ext,
+ }
diff --git a/youtube_dl/extractor/vporn.py b/youtube_dl/extractor/vporn.py
index 426369c51..2d23effcc 100644
--- a/youtube_dl/extractor/vporn.py
+++ b/youtube_dl/extractor/vporn.py
@@ -11,22 +11,48 @@ from ..utils import (
class VpornIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vporn\.com/[^/]+/(?P<display_id>[^/]+)/(?P<id>\d+)'
- _TEST = {
- 'url': 'http://www.vporn.com/masturbation/violet-on-her-th-birthday/497944/',
- 'md5': 'facf37c1b86546fa0208058546842c55',
- 'info_dict': {
- 'id': '497944',
- 'display_id': 'violet-on-her-th-birthday',
- 'ext': 'mp4',
- 'title': 'Violet on her 19th birthday',
- 'description': 'Violet dances in front of the camera which is sure to get you horny.',
- 'thumbnail': 're:^https?://.*\.jpg$',
- 'uploader': 'kileyGrope',
- 'categories': ['Masturbation', 'Teen'],
- 'duration': 393,
- 'age_limit': 18,
- }
- }
+ _TESTS = [
+ {
+ 'url': 'http://www.vporn.com/masturbation/violet-on-her-th-birthday/497944/',
+ 'md5': 'facf37c1b86546fa0208058546842c55',
+ 'info_dict': {
+ 'id': '497944',
+ 'display_id': 'violet-on-her-th-birthday',
+ 'ext': 'mp4',
+ 'title': 'Violet on her 19th birthday',
+ 'description': 'Violet dances in front of the camera which is sure to get you horny.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'kileyGrope',
+ 'categories': ['Masturbation', 'Teen'],
+ 'duration': 393,
+ 'age_limit': 18,
+ 'view_count': int,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ }
+ },
+ {
+ 'url': 'http://www.vporn.com/female/hana-shower/523564/',
+ 'md5': 'ced35a4656198a1664cf2cda1575a25f',
+ 'info_dict': {
+ 'id': '523564',
+ 'display_id': 'hana-shower',
+ 'ext': 'mp4',
+ 'title': 'Hana Shower',
+ 'description': 'Hana showers at the bathroom.',
+ 'thumbnail': 're:^https?://.*\.jpg$',
+ 'uploader': 'Hmmmmm',
+ 'categories': ['Big Boobs', 'Erotic', 'Teen', 'Female'],
+ 'duration': 588,
+ 'age_limit': 18,
+ 'view_count': int,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ }
+ },
+ ]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -64,7 +90,7 @@ class VpornIE(InfoExtractor):
formats = []
- for video in re.findall(r'flashvars\.videoUrl([^=]+?)\s*=\s*"([^"]+)"', webpage):
+ for video in re.findall(r'flashvars\.videoUrl([^=]+?)\s*=\s*"(https?://[^"]+)"', webpage):
video_url = video[1]
fmt = {
'url': video_url,
diff --git a/youtube_dl/extractor/xhamster.py b/youtube_dl/extractor/xhamster.py
index 00b6d1eba..4e8fbde8d 100644
--- a/youtube_dl/extractor/xhamster.py
+++ b/youtube_dl/extractor/xhamster.py
@@ -18,7 +18,6 @@ class XHamsterIE(InfoExtractor):
_TESTS = [
{
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
- 'md5': '8281348b8d3c53d39fffb377d24eac4e',
'info_dict': {
'id': '1509445',
'ext': 'mp4',
@@ -31,7 +30,6 @@ class XHamsterIE(InfoExtractor):
},
{
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
- 'md5': '4cbd8d56708ecb4fb4124c23e4acb81a',
'info_dict': {
'id': '2221348',
'ext': 'mp4',
diff --git a/youtube_dl/extractor/youporn.py b/youtube_dl/extractor/youporn.py
index d456c4da5..7bfda45e7 100644
--- a/youtube_dl/extractor/youporn.py
+++ b/youtube_dl/extractor/youporn.py
@@ -23,7 +23,6 @@ class YouPornIE(InfoExtractor):
_VALID_URL = r'^(?P<proto>https?://)(?:www\.)?(?P<url>youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+))'
_TEST = {
'url': 'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
- 'md5': '71ec5fcfddacf80f495efa8b6a8d9a89',
'info_dict': {
'id': '505835',
'ext': 'mp4',
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index c77f09aac..b54c69122 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -1,5 +1,8 @@
# coding: utf-8
+from __future__ import unicode_literals
+
+
import itertools
import json
import os.path
@@ -69,29 +72,29 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
- login_page, u'Login GALX parameter')
+ login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
- u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
- u'Email': username,
- u'GALX': galx,
- u'Passwd': password,
-
- u'PersistentCookie': u'yes',
- u'_utf8': u'霱',
- u'bgresponse': u'js_disabled',
- u'checkConnection': u'',
- u'checkedDomains': u'youtube',
- u'dnConn': u'',
- u'pstMsg': u'0',
- u'rmShown': u'1',
- u'secTok': u'',
- u'signIn': u'Sign in',
- u'timeStmp': u'',
- u'service': u'youtube',
- u'uilel': u'3',
- u'hl': u'en_US',
+ 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ 'Email': username,
+ 'GALX': galx,
+ 'Passwd': password,
+
+ 'PersistentCookie': 'yes',
+ '_utf8': '霱',
+ 'bgresponse': 'js_disabled',
+ 'checkConnection': '',
+ 'checkedDomains': 'youtube',
+ 'dnConn': '',
+ 'pstMsg': '0',
+ 'rmShown': '1',
+ 'secTok': '',
+ 'signIn': 'Sign in',
+ 'timeStmp': '',
+ 'service': 'youtube',
+ 'uilel': '3',
+ 'hl': 'en_US',
}
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
@@ -132,19 +135,19 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
timeStmp = match.group(1)
tfa_form_strs = {
- u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
- u'smsToken': u'',
- u'smsUserPin': tfa_code,
- u'smsVerifyPin': u'Verify',
-
- u'PersistentCookie': u'yes',
- u'checkConnection': u'',
- u'checkedDomains': u'youtube',
- u'pstMsg': u'1',
- u'secTok': secTok,
- u'timeStmp': timeStmp,
- u'service': u'youtube',
- u'hl': u'en_US',
+ 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
+ 'smsToken': '',
+ 'smsUserPin': tfa_code,
+ 'smsVerifyPin': 'Verify',
+
+ 'PersistentCookie': 'yes',
+ 'checkConnection': '',
+ 'checkedDomains': 'youtube',
+ 'pstMsg': '1',
+ 'secTok': secTok,
+ 'timeStmp': timeStmp,
+ 'service': 'youtube',
+ 'hl': 'en_US',
}
tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
@@ -196,7 +199,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
- IE_DESC = u'YouTube.com'
+ IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
@@ -221,6 +224,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
+ (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
@@ -300,7 +304,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'_rtmp': {'protocol': 'rtmp'},
}
- IE_NAME = u'youtube'
+ IE_NAME = 'youtube'
_TESTS = [
{
u"url": u"http://www.youtube.com/watch?v=BaW_jenozKc",
@@ -359,7 +363,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
u"info_dict": {
u"upload_date": "20121002",
u"uploader_id": "8KVIDEO",
- u"description": "No description available.",
+ u"description": '',
u"uploader": "8KVIDEO",
u"title": "UHDTV TEST 8K VIDEO.mp4"
},
@@ -370,30 +374,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
},
# DASH manifest with encrypted signature
{
- u'url': u'https://www.youtube.com/watch?v=IB3lcPjvWLA',
- u'info_dict': {
- u'id': u'IB3lcPjvWLA',
- u'ext': u'm4a',
- u'title': u'Afrojack - The Spark ft. Spree Wilson',
- u'description': u'md5:9717375db5a9a3992be4668bbf3bc0a8',
- u'uploader': u'AfrojackVEVO',
- u'uploader_id': u'AfrojackVEVO',
- u'upload_date': u'20131011',
+ 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
+ 'info_dict': {
+ 'id': 'IB3lcPjvWLA',
+ 'ext': 'm4a',
+ 'title': 'Afrojack - The Spark ft. Spree Wilson',
+ 'description': 'md5:9717375db5a9a3992be4668bbf3bc0a8',
+ 'uploader': 'AfrojackVEVO',
+ 'uploader_id': 'AfrojackVEVO',
+ 'upload_date': '20131011',
},
u"params": {
- u'youtube_include_dash_manifest': True,
- u'format': '141',
+ 'youtube_include_dash_manifest': True,
+ 'format': '141',
},
},
]
-
- @classmethod
- def suitable(cls, url):
- """Receives a URL and returns True if suitable for this IE."""
- if YoutubePlaylistIE.suitable(url): return False
- return re.match(cls._VALID_URL, url) is not None
-
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
@@ -416,7 +413,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
- return u'.'.join(compat_str(len(part)) for part in example_sig.split('.'))
+ return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
@@ -434,7 +431,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
cache_spec = self._downloader.cache.load(u'youtube-sigfuncs', func_id)
if cache_spec is not None:
- return lambda s: u''.join(s[i] for i in cache_spec)
+ return lambda s: ''.join(s[i] for i in cache_spec)
if player_type == 'js':
code = self._download_webpage(
@@ -453,7 +450,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
assert False, 'Invalid player type %r' % player_type
if cache_spec is None:
- test_string = u''.join(map(compat_chr, range(len(example_sig))))
+ test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
@@ -463,10 +460,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
- starts = u'' if start == 0 else str(start)
- ends = (u':%d' % (end+step)) if end + step >= 0 else u':'
- steps = u'' if step == 1 else (u':%d' % step)
- return u's[%s%s%s]' % (starts, ends, steps)
+ starts = '' if start == 0 else str(start)
+ ends = (u':%d' % (end+step)) if end + step >= 0 else ':'
+ steps = '' if step == 1 else (u':%d' % step)
+ return 's[%s%s%s]' % (starts, ends, steps)
step = None
start = '(Never used)' # Quelch pyflakes warnings - start will be
@@ -483,26 +480,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
start = prev
continue
else:
- yield u's[%d]' % prev
+ yield 's[%d]' % prev
if step is None:
- yield u's[%d]' % i
+ yield 's[%d]' % i
else:
yield _genslice(start, i, step)
- test_string = u''.join(map(compat_chr, range(len(example_sig))))
+ test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
- expr_code = u' + '.join(gen_sig_code(cache_spec))
+ expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = (u'if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
- u' return %s\n') % (signature_id_tuple, expr_code)
+ ' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen(u'Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'signature=([$a-zA-Z]+)', jscode,
- u'Initial JS player signature function name')
+ 'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
@@ -510,9 +507,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
- TARGET_CLASSNAME = u'SignatureDecipher'
+ TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
- initial_function = swfi.extract_function(searched_class, u'decipher')
+ initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
@@ -522,7 +519,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
raise ExtractorError(u'Cannot decrypt signature without player_url')
if player_url.startswith(u'//'):
- player_url = u'https:' + player_url
+ player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
@@ -537,7 +534,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
- u'Signature extraction failed: ' + tb, cause=e)
+ 'Signature extraction failed: ' + tb, cause=e)
def _get_available_subtitles(self, video_id, webpage):
try:
@@ -560,7 +557,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
'name': unescapeHTML(l[0]).encode('utf-8'),
})
- url = u'https://www.youtube.com/api/timedtext?' + params
+ url = 'https://www.youtube.com/api/timedtext?' + params
sub_lang_list[lang] = url
if not sub_lang_list:
self._downloader.report_warning(u'video doesn\'t have subtitles')
@@ -573,7 +570,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
self.to_screen(u'%s: Looking for automatic captions' % video_id)
mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
- err_msg = u'Couldn\'t find automatic captions for %s' % video_id
+ err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if mobj is None:
self._downloader.report_warning(err_msg)
return {}
@@ -629,7 +626,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
- manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest')
+ manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
@@ -642,8 +639,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
def _real_extract(self, url):
proto = (
- u'http' if self._downloader.params.get('prefer_insecure', False)
- else u'https')
+ 'http' if self._downloader.params.get('prefer_insecure', False)
+ else 'https')
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
@@ -694,11 +691,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
if 'token' not in video_info:
if 'reason' in video_info:
raise ExtractorError(
- u'YouTube said: %s' % video_info['reason'][0],
+ 'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
- u'"token" parameter not in video info for unknown reason',
+ '"token" parameter not in video info for unknown reason',
video_id=video_id)
if 'view_count' in video_info:
@@ -731,7 +728,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
video_title = video_info['title'][0]
else:
self._downloader.report_warning(u'Unable to extract video title')
- video_title = u'_'
+ video_title = '_'
# thumbnail image
# We try first to get a high quality image:
@@ -785,7 +782,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
- video_description = u''
+ video_description = ''
def _extract_count(count_name):
count = self._search_regex(
@@ -832,7 +829,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
if m_s is not None:
self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
- m_s = re_signature.search(args.get('adaptive_fmts', u''))
+ m_s = re_signature.search(args.get('adaptive_fmts', ''))
if m_s is not None:
if 'adaptive_fmts' in video_info:
video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
@@ -882,12 +879,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
if not age_gate:
jsplayer_url_json = self._search_regex(
r'"assets":.+?"js":\s*("[^"]+")',
- video_webpage, u'JS player URL')
+ video_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
- video_webpage, u'age gate player URL')
+ video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
@@ -898,14 +895,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
- u'flash player', fatal=False)
+ 'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
r'html5player-([^/]+?)(?:/html5player)?\.js',
player_url,
'html5 player', fatal=False)
- player_desc = u'html5 player %s' % player_version
+ player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen(u'{%s} signature length %s, %s' %
@@ -997,7 +994,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
- IE_DESC = u'YouTube.com playlists'
+ IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
@@ -1019,7 +1016,47 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_MORE_PAGES_INDICATOR = r'data-link-type="next"'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
- IE_NAME = u'youtube:playlist'
+ IE_NAME = 'youtube:playlist'
+ _TESTS = [{
+ 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
+ 'info_dict': {
+ 'title': 'ytdl test PL',
+ },
+ 'playlist_count': 3,
+ }, {
+ 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
+ 'info_dict': {
+ 'title': 'YDL_Empty_List',
+ },
+ 'playlist_count': 0,
+ }, {
+ 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
+ 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
+ 'info_dict': {
+ 'title': '29C3: Not my department',
+ },
+ 'playlist_count': 95,
+ }, {
+ 'note': 'issue #673',
+ 'url': 'PLBB231211A4F62143',
+ 'info_dict': {
+ 'title': 'Team Fortress 2 (Class-based LP)',
+ },
+ 'playlist_mincount': 26,
+ }, {
+ 'note': 'Large playlist',
+ 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
+ 'info_dict': {
+ 'title': 'Uploads from Cauchemar',
+ },
+ 'playlist_mincount': 799,
+ }, {
+ 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
+ 'info_dict': {
+ 'title': 'YDL_safe_search',
+ },
+ 'playlist_count': 2,
+ }]
def _real_initialize(self):
self._login()
@@ -1034,7 +1071,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
# the id of the playlist is just 'RD' + video_id
url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
webpage = self._download_webpage(
- url, playlist_id, u'Downloading Youtube mix')
+ url, playlist_id, 'Downloading Youtube mix')
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
@@ -1071,7 +1108,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
return self._extract_mix(playlist_id)
if playlist_id.startswith('TL'):
raise ExtractorError(u'For downloading YouTube.com top lists, use '
- u'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
+ 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
@@ -1080,7 +1117,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
# Check if the playlist exists or is private
if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
raise ExtractorError(
- u'The playlist doesn\'t exist or is private, use --username or '
+ 'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
@@ -1107,17 +1144,18 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
- page, u'title')
+ page, 'title')
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, playlist_title)
class YoutubeTopListIE(YoutubePlaylistIE):
- IE_NAME = u'youtube:toplist'
+ IE_NAME = 'youtube:toplist'
IE_DESC = (u'YouTube.com top lists, "yttoplist:{channel}:{list title}"'
- u' (Example: "yttoplist:music:Top Tracks")')
+ ' (Example: "yttoplist:music:Top Tracks")')
_VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
+ _TESTS = []
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
@@ -1126,7 +1164,7 @@ class YoutubeTopListIE(YoutubePlaylistIE):
query = compat_urllib_parse.urlencode({'title': title})
playlist_re = 'href="([^"]+?%s.*?)"' % re.escape(query)
channel_page = self._download_webpage('https://www.youtube.com/%s' % channel, title)
- link = self._html_search_regex(playlist_re, channel_page, u'list')
+ link = self._html_search_regex(playlist_re, channel_page, 'list')
url = compat_urlparse.urljoin('https://www.youtube.com/', link)
video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
@@ -1134,7 +1172,7 @@ class YoutubeTopListIE(YoutubePlaylistIE):
# sometimes the webpage doesn't contain the videos
# retry until we get them
for i in itertools.count(0):
- msg = u'Downloading Youtube mix'
+ msg = 'Downloading Youtube mix'
if i > 0:
msg += ', retry #%d' % i
@@ -1147,11 +1185,11 @@ class YoutubeTopListIE(YoutubePlaylistIE):
class YoutubeChannelIE(InfoExtractor):
- IE_DESC = u'YouTube.com channels'
+ IE_DESC = 'YouTube.com channels'
_VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
_MORE_PAGES_INDICATOR = 'yt-uix-load-more'
_MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
- IE_NAME = u'youtube:channel'
+ IE_NAME = 'youtube:channel'
def extract_videos_from_page(self, page):
ids_in_page = []
@@ -1203,12 +1241,12 @@ class YoutubeChannelIE(InfoExtractor):
class YoutubeUserIE(InfoExtractor):
- IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
+ IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
_GDATA_PAGE_SIZE = 50
_GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
- IE_NAME = u'youtube:user'
+ IE_NAME = 'youtube:user'
@classmethod
def suitable(cls, url):
@@ -1237,7 +1275,7 @@ class YoutubeUserIE(InfoExtractor):
gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
page = self._download_webpage(
gdata_url, username,
- u'Downloading video ids from %d to %d' % (
+ 'Downloading video ids from %d to %d' % (
start_index, start_index + self._GDATA_PAGE_SIZE))
try:
@@ -1265,10 +1303,10 @@ class YoutubeUserIE(InfoExtractor):
class YoutubeSearchIE(SearchInfoExtractor):
- IE_DESC = u'YouTube.com searches'
- _API_URL = u'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
+ IE_DESC = 'YouTube.com searches'
+ _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
_MAX_RESULTS = 1000
- IE_NAME = u'youtube:search'
+ IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
def _get_n_results(self, query, n):
@@ -1292,7 +1330,7 @@ class YoutubeSearchIE(SearchInfoExtractor):
if 'items' not in api_response:
raise ExtractorError(
- u'[youtube] No video results', expected=True)
+ '[youtube] No video results', expected=True)
new_ids = list(video['id'] for video in api_response['items'])
video_ids += new_ids
@@ -1311,12 +1349,12 @@ class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
_SEARCH_KEY = 'ytsearchdate'
- IE_DESC = u'YouTube.com searches, newest videos first'
+ IE_DESC = 'YouTube.com searches, newest videos first'
class YoutubeSearchURLIE(InfoExtractor):
- IE_DESC = u'YouTube.com search URLs'
- IE_NAME = u'youtube:search_url'
+ IE_DESC = 'YouTube.com search URLs'
+ IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
def _real_extract(self, url):
@@ -1325,7 +1363,7 @@ class YoutubeSearchURLIE(InfoExtractor):
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
- r'(?s)<ol class="item-section"(.*?)</ol>', webpage, u'result HTML')
+ r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
@@ -1351,14 +1389,14 @@ class YoutubeSearchURLIE(InfoExtractor):
class YoutubeShowIE(InfoExtractor):
- IE_DESC = u'YouTube.com (multi-season) shows'
+ IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(.*)'
- IE_NAME = u'youtube:show'
+ IE_NAME = 'youtube:show'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
show_name = mobj.group(1)
- webpage = self._download_webpage(url, show_name, u'Downloading show webpage')
+ webpage = self._download_webpage(url, show_name, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
self.to_screen(u'%s: Found %s seasons' % (show_name, len(m_seasons)))
@@ -1384,7 +1422,7 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
@property
def IE_NAME(self):
- return u'youtube:%s' % self._FEED_NAME
+ return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
@@ -1394,9 +1432,10 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
paging = 0
for i in itertools.count(1):
info = self._download_json(self._FEED_TEMPLATE % paging,
- u'%s feed' % self._FEED_NAME,
- u'Downloading page %s' % i)
+ '%s feed' % self._FEED_NAME,
+ 'Downloading page %s' % i)
feed_html = info.get('feed_html') or info.get('content_html')
+ load_more_widget_html = info.get('load_more_widget_html') or feed_html
m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
ids = orderedSet(m.group(1) for m in m_ids)
feed_entries.extend(
@@ -1404,51 +1443,52 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
for video_id in ids)
mobj = re.search(
r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
- feed_html)
+ load_more_widget_html)
if mobj is None:
break
paging = mobj.group('paging')
return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
- IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
+ IE_DESC = 'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
- _PLAYLIST_TITLE = u'Youtube Recommended videos'
+ _PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
- IE_DESC = u'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
+ IE_DESC = 'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
_FEED_NAME = 'watch_later'
- _PLAYLIST_TITLE = u'Youtube Watch Later'
+ _PLAYLIST_TITLE = 'Youtube Watch Later'
_PERSONAL_FEED = True
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
- IE_DESC = u'Youtube watch history, "ythistory" keyword (requires authentication)'
- _VALID_URL = u'https?://www\.youtube\.com/feed/history|:ythistory'
+ IE_DESC = 'Youtube watch history, "ythistory" keyword (requires authentication)'
+ _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PERSONAL_FEED = True
- _PLAYLIST_TITLE = u'Youtube Watch History'
+ _PLAYLIST_TITLE = 'Youtube Watch History'
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
- IE_NAME = u'youtube:favorites'
- IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
+ IE_NAME = 'youtube:favorites'
+ IE_DESC = 'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
- playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, u'favourites playlist id')
+ playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeSubscriptionsIE(YoutubePlaylistIE):
- IE_NAME = u'youtube:subscriptions'
- IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
+ IE_NAME = 'youtube:subscriptions'
+ IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
+ _TESTS = []
def _real_extract(self, url):
- title = u'Youtube Subscriptions'
+ title = 'Youtube Subscriptions'
page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
# The extraction process is the same as for playlists, but the regex
@@ -1500,9 +1540,9 @@ class YoutubeTruncatedURLIE(InfoExtractor):
def _real_extract(self, url):
raise ExtractorError(
- u'Did you forget to quote the URL? Remember that & is a meta '
- u'character in most shells, so you want to put the URL in quotes, '
- u'like youtube-dl '
- u'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
- u' or simply youtube-dl BaW_jenozKc .',
+ 'Did you forget to quote the URL? Remember that & is a meta '
+ 'character in most shells, so you want to put the URL in quotes, '
+ 'like youtube-dl '
+ '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
+ ' or simply youtube-dl BaW_jenozKc .',
expected=True)
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
new file mode 100644
index 000000000..31baab469
--- /dev/null
+++ b/youtube_dl/options.py
@@ -0,0 +1,481 @@
+from __future__ import unicode_literals
+
+import os.path
+import optparse
+import shlex
+import sys
+
+from .utils import (
+ get_term_width,
+ write_string,
+)
+from .version import __version__
+
+
+def parseOpts(overrideArguments=None):
+ def _readOptions(filename_bytes, default=[]):
+ try:
+ optionf = open(filename_bytes)
+ except IOError:
+ return default # silently skip if file is not present
+ try:
+ res = []
+ for l in optionf:
+ res += shlex.split(l, comments=True)
+ finally:
+ optionf.close()
+ return res
+
+ def _readUserConf():
+ xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
+ if xdg_config_home:
+ userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
+ if not os.path.isfile(userConfFile):
+ userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
+ else:
+ userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
+ if not os.path.isfile(userConfFile):
+ userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
+ userConf = _readOptions(userConfFile, None)
+
+ if userConf is None:
+ appdata_dir = os.environ.get('appdata')
+ if appdata_dir:
+ userConf = _readOptions(
+ os.path.join(appdata_dir, 'youtube-dl', 'config'),
+ default=None)
+ if userConf is None:
+ userConf = _readOptions(
+ os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
+ default=None)
+
+ if userConf is None:
+ userConf = _readOptions(
+ os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
+ default=None)
+ if userConf is None:
+ userConf = _readOptions(
+ os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
+ default=None)
+
+ if userConf is None:
+ userConf = []
+
+ return userConf
+
+ def _format_option_string(option):
+ ''' ('-o', '--option') -> -o, --format METAVAR'''
+
+ opts = []
+
+ if option._short_opts:
+ opts.append(option._short_opts[0])
+ if option._long_opts:
+ opts.append(option._long_opts[0])
+ if len(opts) > 1:
+ opts.insert(1, ', ')
+
+ if option.takes_value(): opts.append(' %s' % option.metavar)
+
+ return "".join(opts)
+
+ def _comma_separated_values_options_callback(option, opt_str, value, parser):
+ setattr(parser.values, option.dest, value.split(','))
+
+ def _hide_login_info(opts):
+ opts = list(opts)
+ for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
+ try:
+ i = opts.index(private_opt)
+ opts[i+1] = '<PRIVATE>'
+ except ValueError:
+ pass
+ return opts
+
+ max_width = 80
+ max_help_position = 80
+
+ # No need to wrap help messages if we're on a wide console
+ columns = get_term_width()
+ if columns: max_width = columns
+
+ fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
+ fmt.format_option_strings = _format_option_string
+
+ kw = {
+ 'version' : __version__,
+ 'formatter' : fmt,
+ 'usage' : '%prog [options] url [url...]',
+ 'conflict_handler' : 'resolve',
+ }
+
+ parser = optparse.OptionParser(**kw)
+
+ # option groups
+ general = optparse.OptionGroup(parser, 'General Options')
+ selection = optparse.OptionGroup(parser, 'Video Selection')
+ authentication = optparse.OptionGroup(parser, 'Authentication Options')
+ video_format = optparse.OptionGroup(parser, 'Video Format Options')
+ subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
+ downloader = optparse.OptionGroup(parser, 'Download Options')
+ postproc = optparse.OptionGroup(parser, 'Post-processing Options')
+ filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
+ workarounds = optparse.OptionGroup(parser, 'Workarounds')
+ verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
+
+ general.add_option('-h', '--help',
+ action='help', help='print this help text and exit')
+ general.add_option('-v', '--version',
+ action='version', help='print program version and exit')
+ general.add_option('-U', '--update',
+ action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
+ general.add_option('-i', '--ignore-errors',
+ action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
+ general.add_option('--abort-on-error',
+ action='store_false', dest='ignoreerrors',
+ help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
+ general.add_option('--dump-user-agent',
+ action='store_true', dest='dump_user_agent',
+ help='display the current browser identification', default=False)
+ general.add_option('--list-extractors',
+ action='store_true', dest='list_extractors',
+ help='List all supported extractors and the URLs they would handle', default=False)
+ general.add_option('--extractor-descriptions',
+ action='store_true', dest='list_extractor_descriptions',
+ help='Output descriptions of all supported extractors', default=False)
+ general.add_option(
+ '--proxy', dest='proxy', default=None, metavar='URL',
+ help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
+ general.add_option(
+ '--socket-timeout', dest='socket_timeout',
+ type=float, default=None, help=u'Time to wait before giving up, in seconds')
+ general.add_option(
+ '--default-search',
+ dest='default_search', metavar='PREFIX',
+ help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
+ general.add_option(
+ '--ignore-config',
+ action='store_true',
+ help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
+
+ selection.add_option(
+ '--playlist-start',
+ dest='playliststart', metavar='NUMBER', default=1, type=int,
+ help='playlist video to start at (default is %default)')
+ selection.add_option(
+ '--playlist-end',
+ dest='playlistend', metavar='NUMBER', default=None, type=int,
+ help='playlist video to end at (default is last)')
+ selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
+ selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
+ selection.add_option('--max-downloads', metavar='NUMBER',
+ dest='max_downloads', type=int, default=None,
+ help='Abort after downloading NUMBER files')
+ selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
+ selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
+ selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
+ selection.add_option(
+ '--datebefore', metavar='DATE', dest='datebefore', default=None,
+ help='download only videos uploaded on or before this date (i.e. inclusive)')
+ selection.add_option(
+ '--dateafter', metavar='DATE', dest='dateafter', default=None,
+ help='download only videos uploaded on or after this date (i.e. inclusive)')
+ selection.add_option(
+ '--min-views', metavar='COUNT', dest='min_views',
+ default=None, type=int,
+ help="Do not download any videos with less than COUNT views",)
+ selection.add_option(
+ '--max-views', metavar='COUNT', dest='max_views',
+ default=None, type=int,
+ help="Do not download any videos with more than COUNT views",)
+ selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
+ selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
+ help='download only videos suitable for the given age',
+ default=None, type=int)
+ selection.add_option('--download-archive', metavar='FILE',
+ dest='download_archive',
+ help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
+ selection.add_option(
+ '--include-ads', dest='include_ads',
+ action='store_true',
+ help='Download advertisements as well (experimental)')
+ selection.add_option(
+ '--youtube-include-dash-manifest', action='store_true',
+ dest='youtube_include_dash_manifest', default=False,
+ help='Try to download the DASH manifest on YouTube videos (experimental)')
+
+ authentication.add_option('-u', '--username',
+ dest='username', metavar='USERNAME', help='account username')
+ authentication.add_option('-p', '--password',
+ dest='password', metavar='PASSWORD', help='account password')
+ authentication.add_option('-2', '--twofactor',
+ dest='twofactor', metavar='TWOFACTOR', help='two-factor auth code')
+ authentication.add_option('-n', '--netrc',
+ action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
+ authentication.add_option('--video-password',
+ dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
+
+
+ video_format.add_option('-f', '--format',
+ action='store', dest='format', metavar='FORMAT', default=None,
+ help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.')
+ video_format.add_option('--all-formats',
+ action='store_const', dest='format', help='download all available video formats', const='all')
+ video_format.add_option('--prefer-free-formats',
+ action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
+ video_format.add_option('--max-quality',
+ action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
+ video_format.add_option('-F', '--list-formats',
+ action='store_true', dest='listformats', help='list all available formats')
+
+ subtitles.add_option('--write-sub', '--write-srt',
+ action='store_true', dest='writesubtitles',
+ help='write subtitle file', default=False)
+ subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
+ action='store_true', dest='writeautomaticsub',
+ help='write automatic subtitle file (youtube only)', default=False)
+ subtitles.add_option('--all-subs',
+ action='store_true', dest='allsubtitles',
+ help='downloads all the available subtitles of the video', default=False)
+ subtitles.add_option('--list-subs',
+ action='store_true', dest='listsubtitles',
+ help='lists all available subtitles for the video', default=False)
+ subtitles.add_option('--sub-format',
+ action='store', dest='subtitlesformat', metavar='FORMAT',
+ help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
+ subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
+ action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
+ default=[], callback=_comma_separated_values_options_callback,
+ help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
+
+ downloader.add_option('-r', '--rate-limit',
+ dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
+ downloader.add_option('-R', '--retries',
+ dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
+ downloader.add_option('--buffer-size',
+ dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
+ downloader.add_option('--no-resize-buffer',
+ action='store_true', dest='noresizebuffer',
+ help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
+ downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
+
+ workarounds.add_option(
+ '--encoding', dest='encoding', metavar='ENCODING',
+ help='Force the specified encoding (experimental)')
+ workarounds.add_option(
+ '--no-check-certificate', action='store_true',
+ dest='no_check_certificate', default=False,
+ help='Suppress HTTPS certificate validation.')
+ workarounds.add_option(
+ '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
+ help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
+ workarounds.add_option(
+ '--user-agent', metavar='UA',
+ dest='user_agent', help='specify a custom user agent')
+ workarounds.add_option(
+ '--referer', metavar='REF',
+ dest='referer', default=None,
+ help='specify a custom referer, use if the video access is restricted to one domain',
+ )
+ workarounds.add_option(
+ '--add-header', metavar='FIELD:VALUE',
+ dest='headers', action='append',
+ help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
+ )
+ workarounds.add_option(
+ '--bidi-workaround', dest='bidi_workaround', action='store_true',
+ help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
+
+ verbosity.add_option('-q', '--quiet',
+ action='store_true', dest='quiet', help='activates quiet mode', default=False)
+ verbosity.add_option(
+ '--no-warnings',
+ dest='no_warnings', action='store_true', default=False,
+ help='Ignore warnings')
+ verbosity.add_option('-s', '--simulate',
+ action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
+ verbosity.add_option('--skip-download',
+ action='store_true', dest='skip_download', help='do not download the video', default=False)
+ verbosity.add_option('-g', '--get-url',
+ action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
+ verbosity.add_option('-e', '--get-title',
+ action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
+ verbosity.add_option('--get-id',
+ action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
+ verbosity.add_option('--get-thumbnail',
+ action='store_true', dest='getthumbnail',
+ help='simulate, quiet but print thumbnail URL', default=False)
+ verbosity.add_option('--get-description',
+ action='store_true', dest='getdescription',
+ help='simulate, quiet but print video description', default=False)
+ verbosity.add_option('--get-duration',
+ action='store_true', dest='getduration',
+ help='simulate, quiet but print video length', default=False)
+ verbosity.add_option('--get-filename',
+ action='store_true', dest='getfilename',
+ help='simulate, quiet but print output filename', default=False)
+ verbosity.add_option('--get-format',
+ action='store_true', dest='getformat',
+ help='simulate, quiet but print output format', default=False)
+ verbosity.add_option('-j', '--dump-json',
+ action='store_true', dest='dumpjson',
+ help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
+ verbosity.add_option('--newline',
+ action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
+ verbosity.add_option('--no-progress',
+ action='store_true', dest='noprogress', help='do not print progress bar', default=False)
+ verbosity.add_option('--console-title',
+ action='store_true', dest='consoletitle',
+ help='display progress in console titlebar', default=False)
+ verbosity.add_option('-v', '--verbose',
+ action='store_true', dest='verbose', help='print various debugging information', default=False)
+ verbosity.add_option('--dump-intermediate-pages',
+ action='store_true', dest='dump_intermediate_pages', default=False,
+ help='print downloaded pages to debug problems (very verbose)')
+ verbosity.add_option('--write-pages',
+ action='store_true', dest='write_pages', default=False,
+ help='Write downloaded intermediary pages to files in the current directory to debug problems')
+ verbosity.add_option('--youtube-print-sig-code',
+ action='store_true', dest='youtube_print_sig_code', default=False,
+ help=optparse.SUPPRESS_HELP)
+ verbosity.add_option('--print-traffic',
+ dest='debug_printtraffic', action='store_true', default=False,
+ help='Display sent and read HTTP traffic')
+
+
+ filesystem.add_option('-a', '--batch-file',
+ dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
+ filesystem.add_option('--id',
+ action='store_true', dest='useid', help='use only video ID in file name', default=False)
+ filesystem.add_option('-A', '--auto-number',
+ action='store_true', dest='autonumber',
+ help='number downloaded files starting from 00000', default=False)
+ filesystem.add_option('-o', '--output',
+ dest='outtmpl', metavar='TEMPLATE',
+ help=('output filename template. Use %(title)s to get the title, '
+ '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
+ '%(autonumber)s to get an automatically incremented number, '
+ '%(ext)s for the filename extension, '
+ '%(format)s for the format description (like "22 - 1280x720" or "HD"), '
+ '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
+ '%(upload_date)s for the upload date (YYYYMMDD), '
+ '%(extractor)s for the provider (youtube, metacafe, etc), '
+ '%(id)s for the video id, %(playlist)s for the playlist the video is in, '
+ '%(playlist_index)s for the position in the playlist and %% for a literal percent. '
+ '%(height)s and %(width)s for the width and height of the video format. '
+ '%(resolution)s for a textual description of the resolution of the video format. '
+ 'Use - to output to stdout. Can also be used to download to a different directory, '
+ 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
+ filesystem.add_option('--autonumber-size',
+ dest='autonumber_size', metavar='NUMBER',
+ help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
+ filesystem.add_option('--restrict-filenames',
+ action='store_true', dest='restrictfilenames',
+ help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
+ filesystem.add_option('-t', '--title',
+ action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
+ filesystem.add_option('-l', '--literal',
+ action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
+ filesystem.add_option('-w', '--no-overwrites',
+ action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
+ filesystem.add_option('-c', '--continue',
+ action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
+ filesystem.add_option('--no-continue',
+ action='store_false', dest='continue_dl',
+ help='do not resume partially downloaded files (restart from beginning)')
+ filesystem.add_option('--no-part',
+ action='store_true', dest='nopart', help='do not use .part files', default=False)
+ filesystem.add_option('--no-mtime',
+ action='store_false', dest='updatetime',
+ help='do not use the Last-modified header to set the file modification time', default=True)
+ filesystem.add_option('--write-description',
+ action='store_true', dest='writedescription',
+ help='write video description to a .description file', default=False)
+ filesystem.add_option('--write-info-json',
+ action='store_true', dest='writeinfojson',
+ help='write video metadata to a .info.json file', default=False)
+ filesystem.add_option('--write-annotations',
+ action='store_true', dest='writeannotations',
+ help='write video annotations to a .annotation file', default=False)
+ filesystem.add_option('--write-thumbnail',
+ action='store_true', dest='writethumbnail',
+ help='write thumbnail image to disk', default=False)
+ filesystem.add_option('--load-info',
+ dest='load_info_filename', metavar='FILE',
+ help='json file containing the video information (created with the "--write-json" option)')
+ filesystem.add_option('--cookies',
+ dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
+ filesystem.add_option(
+ '--cache-dir', dest='cachedir', default=None, metavar='DIR',
+ help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
+ filesystem.add_option(
+ '--no-cache-dir', action='store_const', const=False, dest='cachedir',
+ help='Disable filesystem caching')
+ filesystem.add_option(
+ '--rm-cache-dir', action='store_true', dest='rm_cachedir',
+ help='Delete all filesystem cache files')
+
+
+ postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
+ help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
+ postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
+ help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
+ postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
+ help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
+ postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
+ help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
+ postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
+ help='keeps the video file on disk after the post-processing; the video is erased by default')
+ postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
+ help='do not overwrite post-processed files; the post-processed files are overwritten by default')
+ postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
+ help='embed subtitles in the video (only for mp4 videos)')
+ postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
+ help='embed thumbnail in the audio as cover art')
+ postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
+ help='write metadata to the video file')
+ postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
+ help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
+ postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
+ help='Prefer avconv over ffmpeg for running the postprocessors (default)')
+ postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
+ help='Prefer ffmpeg over avconv for running the postprocessors')
+ postproc.add_option(
+ '--exec', metavar='CMD', dest='exec_cmd',
+ help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
+
+ parser.add_option_group(general)
+ parser.add_option_group(selection)
+ parser.add_option_group(downloader)
+ parser.add_option_group(filesystem)
+ parser.add_option_group(verbosity)
+ parser.add_option_group(workarounds)
+ parser.add_option_group(video_format)
+ parser.add_option_group(subtitles)
+ parser.add_option_group(authentication)
+ parser.add_option_group(postproc)
+
+ if overrideArguments is not None:
+ opts, args = parser.parse_args(overrideArguments)
+ if opts.verbose:
+ write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
+ else:
+ commandLineConf = sys.argv[1:]
+ if '--ignore-config' in commandLineConf:
+ systemConf = []
+ userConf = []
+ else:
+ systemConf = _readOptions('/etc/youtube-dl.conf')
+ if '--ignore-config' in systemConf:
+ userConf = []
+ else:
+ userConf = _readUserConf()
+ argv = systemConf + userConf + commandLineConf
+
+ opts, args = parser.parse_args(argv)
+ if opts.verbose:
+ write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
+ write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
+ write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
+
+ return parser, opts, args
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index e924b1688..b644f4e92 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -280,6 +280,11 @@ if sys.version_info >= (2, 7):
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val):
+ # Here comes the crazy part: In 2.6, if the xpath is a unicode,
+ # .//node does not match if a node is a direct child of . !
+ if isinstance(xpath, unicode):
+ xpath = xpath.encode('ascii')
+
for f in node.findall(xpath):
if f.attrib.get(key) == val:
return f
@@ -299,6 +304,20 @@ def xpath_with_ns(path, ns_map):
return '/'.join(replaced)
+def xpath_text(node, xpath, name=None, fatal=False):
+ if sys.version_info < (2, 7): # Crazy 2.6
+ xpath = xpath.encode('ascii')
+
+ n = node.find(xpath)
+ if n is None:
+ if fatal:
+ name = xpath if name is None else name
+ raise ExtractorError('Could not find XML element %s' % name)
+ else:
+ return None
+ return n.text
+
+
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
class BaseHTMLParser(compat_html_parser.HTMLParser):
def __init(self):
@@ -1570,3 +1589,13 @@ except AttributeError:
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
+
+
+def limit_length(s, length):
+ """ Add ellipses to overly long strings """
+ if s is None:
+ return None
+ ELLIPSES = '...'
+ if len(s) > length:
+ return s[:length - len(ELLIPSES)] + ELLIPSES
+ return s
diff --git a/youtube_dl/version.py b/youtube_dl/version.py
index 7496e9296..cf0d862da 100644
--- a/youtube_dl/version.py
+++ b/youtube_dl/version.py
@@ -1,2 +1,2 @@
-__version__ = '2014.09.12'
+__version__ = '2014.09.15.1'