diff options
Diffstat (limited to 'node_modules/bl')
34 files changed, 5229 insertions, 0 deletions
diff --git a/node_modules/bl/.jshintrc b/node_modules/bl/.jshintrc new file mode 100644 index 000000000..c8ef3ca40 --- /dev/null +++ b/node_modules/bl/.jshintrc @@ -0,0 +1,59 @@ +{ + "predef": [ ] + , "bitwise": false + , "camelcase": false + , "curly": false + , "eqeqeq": false + , "forin": false + , "immed": false + , "latedef": false + , "noarg": true + , "noempty": true + , "nonew": true + , "plusplus": false + , "quotmark": true + , "regexp": false + , "undef": true + , "unused": true + , "strict": false + , "trailing": true + , "maxlen": 120 + , "asi": true + , "boss": true + , "debug": true + , "eqnull": true + , "esnext": true + , "evil": true + , "expr": true + , "funcscope": false + , "globalstrict": false + , "iterator": false + , "lastsemic": true + , "laxbreak": true + , "laxcomma": true + , "loopfunc": true + , "multistr": false + , "onecase": false + , "proto": false + , "regexdash": false + , "scripturl": true + , "smarttabs": false + , "shadow": false + , "sub": true + , "supernew": false + , "validthis": true + , "browser": true + , "couch": false + , "devel": false + , "dojo": false + , "mootools": false + , "node": true + , "nonstandard": true + , "prototypejs": false + , "rhino": false + , "worker": true + , "wsh": false + , "nomen": false + , "onevar": false + , "passfail": false +}
\ No newline at end of file diff --git a/node_modules/bl/.npmignore b/node_modules/bl/.npmignore new file mode 100644 index 000000000..40b878db5 --- /dev/null +++ b/node_modules/bl/.npmignore @@ -0,0 +1 @@ +node_modules/
\ No newline at end of file diff --git a/node_modules/bl/.travis.yml b/node_modules/bl/.travis.yml new file mode 100644 index 000000000..5cb0480b4 --- /dev/null +++ b/node_modules/bl/.travis.yml @@ -0,0 +1,13 @@ +sudo: false +language: node_js +node_js: + - '0.10' + - '0.12' + - '4' + - '5' +branches: + only: + - master +notifications: + email: + - rod@vagg.org diff --git a/node_modules/bl/LICENSE.md b/node_modules/bl/LICENSE.md new file mode 100644 index 000000000..ccb24797c --- /dev/null +++ b/node_modules/bl/LICENSE.md @@ -0,0 +1,13 @@ +The MIT License (MIT) +===================== + +Copyright (c) 2014 bl contributors +---------------------------------- + +*bl contributors listed at <https://github.com/rvagg/bl#contributors>* + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/bl/README.md b/node_modules/bl/README.md new file mode 100644 index 000000000..f7044db26 --- /dev/null +++ b/node_modules/bl/README.md @@ -0,0 +1,200 @@ +# bl *(BufferList)* + +[![Build Status](https://travis-ci.org/rvagg/bl.svg?branch=master)](https://travis-ci.org/rvagg/bl) + +**A Node.js Buffer list collector, reader and streamer thingy.** + +[![NPM](https://nodei.co/npm/bl.png?downloads=true&downloadRank=true)](https://nodei.co/npm/bl/) +[![NPM](https://nodei.co/npm-dl/bl.png?months=6&height=3)](https://nodei.co/npm/bl/) + +**bl** is a storage object for collections of Node Buffers, exposing them with the main Buffer readable API. Also works as a duplex stream so you can collect buffers from a stream that emits them and emit buffers to a stream that consumes them! + +The original buffers are kept intact and copies are only done as necessary. Any reads that require the use of a single original buffer will return a slice of that buffer only (which references the same memory as the original buffer). Reads that span buffers perform concatenation as required and return the results transparently. + +```js +const BufferList = require('bl') + +var bl = new BufferList() +bl.append(new Buffer('abcd')) +bl.append(new Buffer('efg')) +bl.append('hi') // bl will also accept & convert Strings +bl.append(new Buffer('j')) +bl.append(new Buffer([ 0x3, 0x4 ])) + +console.log(bl.length) // 12 + +console.log(bl.slice(0, 10).toString('ascii')) // 'abcdefghij' +console.log(bl.slice(3, 10).toString('ascii')) // 'defghij' +console.log(bl.slice(3, 6).toString('ascii')) // 'def' +console.log(bl.slice(3, 8).toString('ascii')) // 'defgh' +console.log(bl.slice(5, 10).toString('ascii')) // 'fghij' + +// or just use toString! +console.log(bl.toString()) // 'abcdefghij\u0003\u0004' +console.log(bl.toString('ascii', 3, 8)) // 'defgh' +console.log(bl.toString('ascii', 5, 10)) // 'fghij' + +// other standard Buffer readables +console.log(bl.readUInt16BE(10)) // 0x0304 +console.log(bl.readUInt16LE(10)) // 0x0403 +``` + +Give it a callback in the constructor and use it just like **[concat-stream](https://github.com/maxogden/node-concat-stream)**: + +```js +const bl = require('bl') + , fs = require('fs') + +fs.createReadStream('README.md') + .pipe(bl(function (err, data) { // note 'new' isn't strictly required + // `data` is a complete Buffer object containing the full data + console.log(data.toString()) + })) +``` + +Note that when you use the *callback* method like this, the resulting `data` parameter is a concatenation of all `Buffer` objects in the list. If you want to avoid the overhead of this concatenation (in cases of extreme performance consciousness), then avoid the *callback* method and just listen to `'end'` instead, like a standard Stream. + +Or to fetch a URL using [hyperquest](https://github.com/substack/hyperquest) (should work with [request](http://github.com/mikeal/request) and even plain Node http too!): +```js +const hyperquest = require('hyperquest') + , bl = require('bl') + , url = 'https://raw.github.com/rvagg/bl/master/README.md' + +hyperquest(url).pipe(bl(function (err, data) { + console.log(data.toString()) +})) +``` + +Or, use it as a readable stream to recompose a list of Buffers to an output source: + +```js +const BufferList = require('bl') + , fs = require('fs') + +var bl = new BufferList() +bl.append(new Buffer('abcd')) +bl.append(new Buffer('efg')) +bl.append(new Buffer('hi')) +bl.append(new Buffer('j')) + +bl.pipe(fs.createWriteStream('gibberish.txt')) +``` + +## API + + * <a href="#ctor"><code><b>new BufferList([ callback ])</b></code></a> + * <a href="#length"><code>bl.<b>length</b></code></a> + * <a href="#append"><code>bl.<b>append(buffer)</b></code></a> + * <a href="#get"><code>bl.<b>get(index)</b></code></a> + * <a href="#slice"><code>bl.<b>slice([ start[, end ] ])</b></code></a> + * <a href="#copy"><code>bl.<b>copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ])</b></code></a> + * <a href="#duplicate"><code>bl.<b>duplicate()</b></code></a> + * <a href="#consume"><code>bl.<b>consume(bytes)</b></code></a> + * <a href="#toString"><code>bl.<b>toString([encoding, [ start, [ end ]]])</b></code></a> + * <a href="#readXX"><code>bl.<b>readDoubleBE()</b></code>, <code>bl.<b>readDoubleLE()</b></code>, <code>bl.<b>readFloatBE()</b></code>, <code>bl.<b>readFloatLE()</b></code>, <code>bl.<b>readInt32BE()</b></code>, <code>bl.<b>readInt32LE()</b></code>, <code>bl.<b>readUInt32BE()</b></code>, <code>bl.<b>readUInt32LE()</b></code>, <code>bl.<b>readInt16BE()</b></code>, <code>bl.<b>readInt16LE()</b></code>, <code>bl.<b>readUInt16BE()</b></code>, <code>bl.<b>readUInt16LE()</b></code>, <code>bl.<b>readInt8()</b></code>, <code>bl.<b>readUInt8()</b></code></a> + * <a href="#streams">Streams</a> + +-------------------------------------------------------- +<a name="ctor"></a> +### new BufferList([ callback | Buffer | Buffer array | BufferList | BufferList array | String ]) +The constructor takes an optional callback, if supplied, the callback will be called with an error argument followed by a reference to the **bl** instance, when `bl.end()` is called (i.e. from a piped stream). This is a convenient method of collecting the entire contents of a stream, particularly when the stream is *chunky*, such as a network stream. + +Normally, no arguments are required for the constructor, but you can initialise the list by passing in a single `Buffer` object or an array of `Buffer` object. + +`new` is not strictly required, if you don't instantiate a new object, it will be done automatically for you so you can create a new instance simply with: + +```js +var bl = require('bl') +var myinstance = bl() + +// equivilant to: + +var BufferList = require('bl') +var myinstance = new BufferList() +``` + +-------------------------------------------------------- +<a name="length"></a> +### bl.length +Get the length of the list in bytes. This is the sum of the lengths of all of the buffers contained in the list, minus any initial offset for a semi-consumed buffer at the beginning. Should accurately represent the total number of bytes that can be read from the list. + +-------------------------------------------------------- +<a name="append"></a> +### bl.append(Buffer | Buffer array | BufferList | BufferList array | String) +`append(buffer)` adds an additional buffer or BufferList to the internal list. `this` is returned so it can be chained. + +-------------------------------------------------------- +<a name="get"></a> +### bl.get(index) +`get()` will return the byte at the specified index. + +-------------------------------------------------------- +<a name="slice"></a> +### bl.slice([ start, [ end ] ]) +`slice()` returns a new `Buffer` object containing the bytes within the range specified. Both `start` and `end` are optional and will default to the beginning and end of the list respectively. + +If the requested range spans a single internal buffer then a slice of that buffer will be returned which shares the original memory range of that Buffer. If the range spans multiple buffers then copy operations will likely occur to give you a uniform Buffer. + +-------------------------------------------------------- +<a name="copy"></a> +### bl.copy(dest, [ destStart, [ srcStart [, srcEnd ] ] ]) +`copy()` copies the content of the list in the `dest` buffer, starting from `destStart` and containing the bytes within the range specified with `srcStart` to `srcEnd`. `destStart`, `start` and `end` are optional and will default to the beginning of the `dest` buffer, and the beginning and end of the list respectively. + +-------------------------------------------------------- +<a name="duplicate"></a> +### bl.duplicate() +`duplicate()` performs a **shallow-copy** of the list. The internal Buffers remains the same, so if you change the underlying Buffers, the change will be reflected in both the original and the duplicate. This method is needed if you want to call `consume()` or `pipe()` and still keep the original list.Example: + +```js +var bl = new BufferList() + +bl.append('hello') +bl.append(' world') +bl.append('\n') + +bl.duplicate().pipe(process.stdout, { end: false }) + +console.log(bl.toString()) +``` + +-------------------------------------------------------- +<a name="consume"></a> +### bl.consume(bytes) +`consume()` will shift bytes *off the start of the list*. The number of bytes consumed don't need to line up with the sizes of the internal Buffers—initial offsets will be calculated accordingly in order to give you a consistent view of the data. + +-------------------------------------------------------- +<a name="toString"></a> +### bl.toString([encoding, [ start, [ end ]]]) +`toString()` will return a string representation of the buffer. The optional `start` and `end` arguments are passed on to `slice()`, while the `encoding` is passed on to `toString()` of the resulting Buffer. See the [Buffer#toString()](http://nodejs.org/docs/latest/api/buffer.html#buffer_buf_tostring_encoding_start_end) documentation for more information. + +-------------------------------------------------------- +<a name="readXX"></a> +### bl.readDoubleBE(), bl.readDoubleLE(), bl.readFloatBE(), bl.readFloatLE(), bl.readInt32BE(), bl.readInt32LE(), bl.readUInt32BE(), bl.readUInt32LE(), bl.readInt16BE(), bl.readInt16LE(), bl.readUInt16BE(), bl.readUInt16LE(), bl.readInt8(), bl.readUInt8() + +All of the standard byte-reading methods of the `Buffer` interface are implemented and will operate across internal Buffer boundaries transparently. + +See the <b><code>[Buffer](http://nodejs.org/docs/latest/api/buffer.html)</code></b> documentation for how these work. + +-------------------------------------------------------- +<a name="streams"></a> +### Streams +**bl** is a Node **[Duplex Stream](http://nodejs.org/docs/latest/api/stream.html#stream_class_stream_duplex)**, so it can be read from and written to like a standard Node stream. You can also `pipe()` to and from a **bl** instance. + +-------------------------------------------------------- + +## Contributors + +**bl** is brought to you by the following hackers: + + * [Rod Vagg](https://github.com/rvagg) + * [Matteo Collina](https://github.com/mcollina) + * [Jarett Cruger](https://github.com/jcrugzz) + +======= + +<a name="license"></a> +## License & copyright + +Copyright (c) 2013-2014 bl contributors (listed above). + +bl is licensed under the MIT license. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE.md file for more details. diff --git a/node_modules/bl/bl.js b/node_modules/bl/bl.js new file mode 100644 index 000000000..f585df172 --- /dev/null +++ b/node_modules/bl/bl.js @@ -0,0 +1,243 @@ +var DuplexStream = require('readable-stream/duplex') + , util = require('util') + + +function BufferList (callback) { + if (!(this instanceof BufferList)) + return new BufferList(callback) + + this._bufs = [] + this.length = 0 + + if (typeof callback == 'function') { + this._callback = callback + + var piper = function piper (err) { + if (this._callback) { + this._callback(err) + this._callback = null + } + }.bind(this) + + this.on('pipe', function onPipe (src) { + src.on('error', piper) + }) + this.on('unpipe', function onUnpipe (src) { + src.removeListener('error', piper) + }) + } else { + this.append(callback) + } + + DuplexStream.call(this) +} + + +util.inherits(BufferList, DuplexStream) + + +BufferList.prototype._offset = function _offset (offset) { + var tot = 0, i = 0, _t + for (; i < this._bufs.length; i++) { + _t = tot + this._bufs[i].length + if (offset < _t) + return [ i, offset - tot ] + tot = _t + } +} + + +BufferList.prototype.append = function append (buf) { + var i = 0 + , newBuf + + if (Array.isArray(buf)) { + for (; i < buf.length; i++) + this.append(buf[i]) + } else if (buf instanceof BufferList) { + // unwrap argument into individual BufferLists + for (; i < buf._bufs.length; i++) + this.append(buf._bufs[i]) + } else if (buf != null) { + // coerce number arguments to strings, since Buffer(number) does + // uninitialized memory allocation + if (typeof buf == 'number') + buf = buf.toString() + + newBuf = Buffer.isBuffer(buf) ? buf : new Buffer(buf) + this._bufs.push(newBuf) + this.length += newBuf.length + } + + return this +} + + +BufferList.prototype._write = function _write (buf, encoding, callback) { + this.append(buf) + + if (typeof callback == 'function') + callback() +} + + +BufferList.prototype._read = function _read (size) { + if (!this.length) + return this.push(null) + + size = Math.min(size, this.length) + this.push(this.slice(0, size)) + this.consume(size) +} + + +BufferList.prototype.end = function end (chunk) { + DuplexStream.prototype.end.call(this, chunk) + + if (this._callback) { + this._callback(null, this.slice()) + this._callback = null + } +} + + +BufferList.prototype.get = function get (index) { + return this.slice(index, index + 1)[0] +} + + +BufferList.prototype.slice = function slice (start, end) { + return this.copy(null, 0, start, end) +} + + +BufferList.prototype.copy = function copy (dst, dstStart, srcStart, srcEnd) { + if (typeof srcStart != 'number' || srcStart < 0) + srcStart = 0 + if (typeof srcEnd != 'number' || srcEnd > this.length) + srcEnd = this.length + if (srcStart >= this.length) + return dst || new Buffer(0) + if (srcEnd <= 0) + return dst || new Buffer(0) + + var copy = !!dst + , off = this._offset(srcStart) + , len = srcEnd - srcStart + , bytes = len + , bufoff = (copy && dstStart) || 0 + , start = off[1] + , l + , i + + // copy/slice everything + if (srcStart === 0 && srcEnd == this.length) { + if (!copy) // slice, just return a full concat + return Buffer.concat(this._bufs) + + // copy, need to copy individual buffers + for (i = 0; i < this._bufs.length; i++) { + this._bufs[i].copy(dst, bufoff) + bufoff += this._bufs[i].length + } + + return dst + } + + // easy, cheap case where it's a subset of one of the buffers + if (bytes <= this._bufs[off[0]].length - start) { + return copy + ? this._bufs[off[0]].copy(dst, dstStart, start, start + bytes) + : this._bufs[off[0]].slice(start, start + bytes) + } + + if (!copy) // a slice, we need something to copy in to + dst = new Buffer(len) + + for (i = off[0]; i < this._bufs.length; i++) { + l = this._bufs[i].length - start + + if (bytes > l) { + this._bufs[i].copy(dst, bufoff, start) + } else { + this._bufs[i].copy(dst, bufoff, start, start + bytes) + break + } + + bufoff += l + bytes -= l + + if (start) + start = 0 + } + + return dst +} + +BufferList.prototype.toString = function toString (encoding, start, end) { + return this.slice(start, end).toString(encoding) +} + +BufferList.prototype.consume = function consume (bytes) { + while (this._bufs.length) { + if (bytes >= this._bufs[0].length) { + bytes -= this._bufs[0].length + this.length -= this._bufs[0].length + this._bufs.shift() + } else { + this._bufs[0] = this._bufs[0].slice(bytes) + this.length -= bytes + break + } + } + return this +} + + +BufferList.prototype.duplicate = function duplicate () { + var i = 0 + , copy = new BufferList() + + for (; i < this._bufs.length; i++) + copy.append(this._bufs[i]) + + return copy +} + + +BufferList.prototype.destroy = function destroy () { + this._bufs.length = 0 + this.length = 0 + this.push(null) +} + + +;(function () { + var methods = { + 'readDoubleBE' : 8 + , 'readDoubleLE' : 8 + , 'readFloatBE' : 4 + , 'readFloatLE' : 4 + , 'readInt32BE' : 4 + , 'readInt32LE' : 4 + , 'readUInt32BE' : 4 + , 'readUInt32LE' : 4 + , 'readInt16BE' : 2 + , 'readInt16LE' : 2 + , 'readUInt16BE' : 2 + , 'readUInt16LE' : 2 + , 'readInt8' : 1 + , 'readUInt8' : 1 + } + + for (var m in methods) { + (function (m) { + BufferList.prototype[m] = function (offset) { + return this.slice(offset, offset + methods[m])[m](0) + } + }(m)) + } +}()) + + +module.exports = BufferList diff --git a/node_modules/bl/node_modules/isarray/.npmignore b/node_modules/bl/node_modules/isarray/.npmignore new file mode 100644 index 000000000..3c3629e64 --- /dev/null +++ b/node_modules/bl/node_modules/isarray/.npmignore @@ -0,0 +1 @@ +node_modules diff --git a/node_modules/bl/node_modules/isarray/.travis.yml b/node_modules/bl/node_modules/isarray/.travis.yml new file mode 100644 index 000000000..cc4dba29d --- /dev/null +++ b/node_modules/bl/node_modules/isarray/.travis.yml @@ -0,0 +1,4 @@ +language: node_js +node_js: + - "0.8" + - "0.10" diff --git a/node_modules/bl/node_modules/isarray/Makefile b/node_modules/bl/node_modules/isarray/Makefile new file mode 100644 index 000000000..787d56e1e --- /dev/null +++ b/node_modules/bl/node_modules/isarray/Makefile @@ -0,0 +1,6 @@ + +test: + @node_modules/.bin/tape test.js + +.PHONY: test + diff --git a/node_modules/bl/node_modules/isarray/README.md b/node_modules/bl/node_modules/isarray/README.md new file mode 100644 index 000000000..16d2c59c6 --- /dev/null +++ b/node_modules/bl/node_modules/isarray/README.md @@ -0,0 +1,60 @@ + +# isarray + +`Array#isArray` for older browsers. + +[![build status](https://secure.travis-ci.org/juliangruber/isarray.svg)](http://travis-ci.org/juliangruber/isarray) +[![downloads](https://img.shields.io/npm/dm/isarray.svg)](https://www.npmjs.org/package/isarray) + +[![browser support](https://ci.testling.com/juliangruber/isarray.png) +](https://ci.testling.com/juliangruber/isarray) + +## Usage + +```js +var isArray = require('isarray'); + +console.log(isArray([])); // => true +console.log(isArray({})); // => false +``` + +## Installation + +With [npm](http://npmjs.org) do + +```bash +$ npm install isarray +``` + +Then bundle for the browser with +[browserify](https://github.com/substack/browserify). + +With [component](http://component.io) do + +```bash +$ component install juliangruber/isarray +``` + +## License + +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/node_modules/bl/node_modules/isarray/component.json b/node_modules/bl/node_modules/isarray/component.json new file mode 100644 index 000000000..9e31b6838 --- /dev/null +++ b/node_modules/bl/node_modules/isarray/component.json @@ -0,0 +1,19 @@ +{ + "name" : "isarray", + "description" : "Array#isArray for older browsers", + "version" : "0.0.1", + "repository" : "juliangruber/isarray", + "homepage": "https://github.com/juliangruber/isarray", + "main" : "index.js", + "scripts" : [ + "index.js" + ], + "dependencies" : {}, + "keywords": ["browser","isarray","array"], + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "license": "MIT" +} diff --git a/node_modules/bl/node_modules/isarray/index.js b/node_modules/bl/node_modules/isarray/index.js new file mode 100644 index 000000000..a57f63495 --- /dev/null +++ b/node_modules/bl/node_modules/isarray/index.js @@ -0,0 +1,5 @@ +var toString = {}.toString; + +module.exports = Array.isArray || function (arr) { + return toString.call(arr) == '[object Array]'; +}; diff --git a/node_modules/bl/node_modules/isarray/package.json b/node_modules/bl/node_modules/isarray/package.json new file mode 100644 index 000000000..5f2ceb1ff --- /dev/null +++ b/node_modules/bl/node_modules/isarray/package.json @@ -0,0 +1,104 @@ +{ + "_args": [ + [ + { + "raw": "isarray@~1.0.0", + "scope": null, + "escapedName": "isarray", + "name": "isarray", + "rawSpec": "~1.0.0", + "spec": ">=1.0.0 <1.1.0", + "type": "range" + }, + "/home/dold/repos/taler/wallet-webex/node_modules/bl/node_modules/readable-stream" + ] + ], + "_from": "isarray@>=1.0.0 <1.1.0", + "_id": "isarray@1.0.0", + "_inCache": true, + "_location": "/bl/isarray", + "_nodeVersion": "5.1.0", + "_npmUser": { + "name": "juliangruber", + "email": "julian@juliangruber.com" + }, + "_npmVersion": "3.3.12", + "_phantomChildren": {}, + "_requested": { + "raw": "isarray@~1.0.0", + "scope": null, + "escapedName": "isarray", + "name": "isarray", + "rawSpec": "~1.0.0", + "spec": ">=1.0.0 <1.1.0", + "type": "range" + }, + "_requiredBy": [ + "/bl/readable-stream" + ], + "_resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "_shasum": "bb935d48582cba168c06834957a54a3e07124f11", + "_shrinkwrap": null, + "_spec": "isarray@~1.0.0", + "_where": "/home/dold/repos/taler/wallet-webex/node_modules/bl/node_modules/readable-stream", + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "bugs": { + "url": "https://github.com/juliangruber/isarray/issues" + }, + "dependencies": {}, + "description": "Array#isArray for older browsers", + "devDependencies": { + "tape": "~2.13.4" + }, + "directories": {}, + "dist": { + "shasum": "bb935d48582cba168c06834957a54a3e07124f11", + "tarball": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz" + }, + "gitHead": "2a23a281f369e9ae06394c0fb4d2381355a6ba33", + "homepage": "https://github.com/juliangruber/isarray", + "keywords": [ + "browser", + "isarray", + "array" + ], + "license": "MIT", + "main": "index.js", + "maintainers": [ + { + "name": "juliangruber", + "email": "julian@juliangruber.com" + } + ], + "name": "isarray", + "optionalDependencies": {}, + "readme": "ERROR: No README data found!", + "repository": { + "type": "git", + "url": "git://github.com/juliangruber/isarray.git" + }, + "scripts": { + "test": "tape test.js" + }, + "testling": { + "files": "test.js", + "browsers": [ + "ie/8..latest", + "firefox/17..latest", + "firefox/nightly", + "chrome/22..latest", + "chrome/canary", + "opera/12..latest", + "opera/next", + "safari/5.1..latest", + "ipad/6.0..latest", + "iphone/6.0..latest", + "android-browser/4.2..latest" + ] + }, + "version": "1.0.0" +} diff --git a/node_modules/bl/node_modules/isarray/test.js b/node_modules/bl/node_modules/isarray/test.js new file mode 100644 index 000000000..e0c3444d8 --- /dev/null +++ b/node_modules/bl/node_modules/isarray/test.js @@ -0,0 +1,20 @@ +var isArray = require('./'); +var test = require('tape'); + +test('is array', function(t){ + t.ok(isArray([])); + t.notOk(isArray({})); + t.notOk(isArray(null)); + t.notOk(isArray(false)); + + var obj = {}; + obj[0] = true; + t.notOk(isArray(obj)); + + var arr = []; + arr.foo = 'bar'; + t.ok(isArray(arr)); + + t.end(); +}); + diff --git a/node_modules/bl/node_modules/readable-stream/.npmignore b/node_modules/bl/node_modules/readable-stream/.npmignore new file mode 100644 index 000000000..38344f87a --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/.npmignore @@ -0,0 +1,5 @@ +build/ +test/ +examples/ +fs.js +zlib.js
\ No newline at end of file diff --git a/node_modules/bl/node_modules/readable-stream/.travis.yml b/node_modules/bl/node_modules/readable-stream/.travis.yml new file mode 100644 index 000000000..1b8211846 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/.travis.yml @@ -0,0 +1,52 @@ +sudo: false +language: node_js +before_install: + - npm install -g npm@2 + - npm install -g npm +notifications: + email: false +matrix: + fast_finish: true + allow_failures: + - env: TASK=browser BROWSER_NAME=ipad BROWSER_VERSION="6.0..latest" + - env: TASK=browser BROWSER_NAME=iphone BROWSER_VERSION="6.0..latest" + include: + - node_js: '0.8' + env: TASK=test + - node_js: '0.10' + env: TASK=test + - node_js: '0.11' + env: TASK=test + - node_js: '0.12' + env: TASK=test + - node_js: 1 + env: TASK=test + - node_js: 2 + env: TASK=test + - node_js: 3 + env: TASK=test + - node_js: 4 + env: TASK=test + - node_js: 5 + env: TASK=test + - node_js: 5 + env: TASK=browser BROWSER_NAME=android BROWSER_VERSION="4.0..latest" + - node_js: 5 + env: TASK=browser BROWSER_NAME=ie BROWSER_VERSION="9..latest" + - node_js: 5 + env: TASK=browser BROWSER_NAME=opera BROWSER_VERSION="11..latest" + - node_js: 5 + env: TASK=browser BROWSER_NAME=chrome BROWSER_VERSION="-3..latest" + - node_js: 5 + env: TASK=browser BROWSER_NAME=firefox BROWSER_VERSION="-3..latest" + - node_js: 5 + env: TASK=browser BROWSER_NAME=ipad BROWSER_VERSION="6.0..latest" + - node_js: 5 + env: TASK=browser BROWSER_NAME=iphone BROWSER_VERSION="6.0..latest" + - node_js: 5 + env: TASK=browser BROWSER_NAME=safari BROWSER_VERSION="5..latest" +script: "npm run $TASK" +env: + global: + - secure: rE2Vvo7vnjabYNULNyLFxOyt98BoJexDqsiOnfiD6kLYYsiQGfr/sbZkPMOFm9qfQG7pjqx+zZWZjGSswhTt+626C0t/njXqug7Yps4c3dFblzGfreQHp7wNX5TFsvrxd6dAowVasMp61sJcRnB2w8cUzoe3RAYUDHyiHktwqMc= + - secure: g9YINaKAdMatsJ28G9jCGbSaguXCyxSTy+pBO6Ch0Cf57ZLOTka3HqDj8p3nV28LUIHZ3ut5WO43CeYKwt4AUtLpBS3a0dndHdY6D83uY6b2qh5hXlrcbeQTq2cvw2y95F7hm4D1kwrgZ7ViqaKggRcEupAL69YbJnxeUDKWEdI= diff --git a/node_modules/bl/node_modules/readable-stream/.zuul.yml b/node_modules/bl/node_modules/readable-stream/.zuul.yml new file mode 100644 index 000000000..96d9cfbd3 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/.zuul.yml @@ -0,0 +1 @@ +ui: tape diff --git a/node_modules/bl/node_modules/readable-stream/LICENSE b/node_modules/bl/node_modules/readable-stream/LICENSE new file mode 100644 index 000000000..e3d4e695a --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/LICENSE @@ -0,0 +1,18 @@ +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/node_modules/bl/node_modules/readable-stream/README.md b/node_modules/bl/node_modules/readable-stream/README.md new file mode 100644 index 000000000..86b95a3bf --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/README.md @@ -0,0 +1,36 @@ +# readable-stream + +***Node-core v5.8.0 streams for userland*** [![Build Status](https://travis-ci.org/nodejs/readable-stream.svg?branch=master)](https://travis-ci.org/nodejs/readable-stream) + + +[![NPM](https://nodei.co/npm/readable-stream.png?downloads=true&downloadRank=true)](https://nodei.co/npm/readable-stream/) +[![NPM](https://nodei.co/npm-dl/readable-stream.png?&months=6&height=3)](https://nodei.co/npm/readable-stream/) + + +[![Sauce Test Status](https://saucelabs.com/browser-matrix/readable-stream.svg)](https://saucelabs.com/u/readable-stream) + +```bash +npm install --save readable-stream +``` + +***Node-core streams for userland*** + +This package is a mirror of the Streams2 and Streams3 implementations in +Node-core, including [documentation](doc/stream.markdown). + +If you want to guarantee a stable streams base, regardless of what version of +Node you, or the users of your libraries are using, use **readable-stream** *only* and avoid the *"stream"* module in Node-core, for background see [this blogpost](http://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html). + +As of version 2.0.0 **readable-stream** uses semantic versioning. + +# Streams WG Team Members + +* **Chris Dickinson** ([@chrisdickinson](https://github.com/chrisdickinson)) <christopher.s.dickinson@gmail.com> + - Release GPG key: 9554F04D7259F04124DE6B476D5A82AC7E37093B +* **Calvin Metcalf** ([@calvinmetcalf](https://github.com/calvinmetcalf)) <calvin.metcalf@gmail.com> + - Release GPG key: F3EF5F62A87FC27A22E643F714CE4FF5015AA242 +* **Rod Vagg** ([@rvagg](https://github.com/rvagg)) <rod@vagg.org> + - Release GPG key: DD8F2338BAE7501E3DD5AC78C273792F7D83545D +* **Sam Newman** ([@sonewman](https://github.com/sonewman)) <newmansam@outlook.com> +* **Mathias Buus** ([@mafintosh](https://github.com/mafintosh)) <mathiasbuus@gmail.com> +* **Domenic Denicola** ([@domenic](https://github.com/domenic)) <d@domenic.me> diff --git a/node_modules/bl/node_modules/readable-stream/doc/stream.markdown b/node_modules/bl/node_modules/readable-stream/doc/stream.markdown new file mode 100644 index 000000000..0bc3819e6 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/doc/stream.markdown @@ -0,0 +1,1760 @@ +# Stream + + Stability: 2 - Stable + +A stream is an abstract interface implemented by various objects in +Node.js. For example a [request to an HTTP server][http-incoming-message] is a +stream, as is [`process.stdout`][]. Streams are readable, writable, or both. All +streams are instances of [`EventEmitter`][]. + +You can load the Stream base classes by doing `require('stream')`. +There are base classes provided for [Readable][] streams, [Writable][] +streams, [Duplex][] streams, and [Transform][] streams. + +This document is split up into 3 sections: + +1. The first section explains the parts of the API that you need to be + aware of to use streams in your programs. +2. The second section explains the parts of the API that you need to + use if you implement your own custom streams yourself. The API is designed to + make this easy for you to do. +3. The third section goes into more depth about how streams work, + including some of the internal mechanisms and functions that you + should probably not modify unless you definitely know what you are + doing. + + +## API for Stream Consumers + +<!--type=misc--> + +Streams can be either [Readable][], [Writable][], or both ([Duplex][]). + +All streams are EventEmitters, but they also have other custom methods +and properties depending on whether they are Readable, Writable, or +Duplex. + +If a stream is both Readable and Writable, then it implements all of +the methods and events. So, a [Duplex][] or [Transform][] stream is +fully described by this API, though their implementation may be +somewhat different. + +It is not necessary to implement Stream interfaces in order to consume +streams in your programs. If you **are** implementing streaming +interfaces in your own program, please also refer to +[API for Stream Implementors][]. + +Almost all Node.js programs, no matter how simple, use Streams in some +way. Here is an example of using Streams in an Node.js program: + +```js +const http = require('http'); + +var server = http.createServer( (req, res) => { + // req is an http.IncomingMessage, which is a Readable Stream + // res is an http.ServerResponse, which is a Writable Stream + + var body = ''; + // we want to get the data as utf8 strings + // If you don't set an encoding, then you'll get Buffer objects + req.setEncoding('utf8'); + + // Readable streams emit 'data' events once a listener is added + req.on('data', (chunk) => { + body += chunk; + }); + + // the end event tells you that you have entire body + req.on('end', () => { + try { + var data = JSON.parse(body); + } catch (er) { + // uh oh! bad json! + res.statusCode = 400; + return res.end(`error: ${er.message}`); + } + + // write back something interesting to the user: + res.write(typeof data); + res.end(); + }); +}); + +server.listen(1337); + +// $ curl localhost:1337 -d '{}' +// object +// $ curl localhost:1337 -d '"foo"' +// string +// $ curl localhost:1337 -d 'not json' +// error: Unexpected token o +``` + +### Class: stream.Duplex + +Duplex streams are streams that implement both the [Readable][] and +[Writable][] interfaces. + +Examples of Duplex streams include: + +* [TCP sockets][] +* [zlib streams][zlib] +* [crypto streams][crypto] + +### Class: stream.Readable + +<!--type=class--> + +The Readable stream interface is the abstraction for a *source* of +data that you are reading from. In other words, data comes *out* of a +Readable stream. + +A Readable stream will not start emitting data until you indicate that +you are ready to receive it. + +Readable streams have two "modes": a **flowing mode** and a **paused +mode**. When in flowing mode, data is read from the underlying system +and provided to your program as fast as possible. In paused mode, you +must explicitly call [`stream.read()`][stream-read] to get chunks of data out. +Streams start out in paused mode. + +**Note**: If no data event handlers are attached, and there are no +[`stream.pipe()`][] destinations, and the stream is switched into flowing +mode, then data will be lost. + +You can switch to flowing mode by doing any of the following: + +* Adding a [`'data'`][] event handler to listen for data. +* Calling the [`stream.resume()`][stream-resume] method to explicitly open the + flow. +* Calling the [`stream.pipe()`][] method to send the data to a [Writable][]. + +You can switch back to paused mode by doing either of the following: + +* If there are no pipe destinations, by calling the + [`stream.pause()`][stream-pause] method. +* If there are pipe destinations, by removing any [`'data'`][] event + handlers, and removing all pipe destinations by calling the + [`stream.unpipe()`][] method. + +Note that, for backwards compatibility reasons, removing [`'data'`][] +event handlers will **not** automatically pause the stream. Also, if +there are piped destinations, then calling [`stream.pause()`][stream-pause] will +not guarantee that the stream will *remain* paused once those +destinations drain and ask for more data. + +Examples of readable streams include: + +* [HTTP responses, on the client][http-incoming-message] +* [HTTP requests, on the server][http-incoming-message] +* [fs read streams][] +* [zlib streams][zlib] +* [crypto streams][crypto] +* [TCP sockets][] +* [child process stdout and stderr][] +* [`process.stdin`][] + +#### Event: 'close' + +Emitted when the stream and any of its underlying resources (a file +descriptor, for example) have been closed. The event indicates that +no more events will be emitted, and no further computation will occur. + +Not all streams will emit the `'close'` event. + +#### Event: 'data' + +* `chunk` {Buffer|String} The chunk of data. + +Attaching a `'data'` event listener to a stream that has not been +explicitly paused will switch the stream into flowing mode. Data will +then be passed as soon as it is available. + +If you just want to get all the data out of the stream as fast as +possible, this is the best way to do so. + +```js +var readable = getReadableStreamSomehow(); +readable.on('data', (chunk) => { + console.log('got %d bytes of data', chunk.length); +}); +``` + +#### Event: 'end' + +This event fires when there will be no more data to read. + +Note that the `'end'` event **will not fire** unless the data is +completely consumed. This can be done by switching into flowing mode, +or by calling [`stream.read()`][stream-read] repeatedly until you get to the +end. + +```js +var readable = getReadableStreamSomehow(); +readable.on('data', (chunk) => { + console.log('got %d bytes of data', chunk.length); +}); +readable.on('end', () => { + console.log('there will be no more data.'); +}); +``` + +#### Event: 'error' + +* {Error Object} + +Emitted if there was an error receiving data. + +#### Event: 'readable' + +When a chunk of data can be read from the stream, it will emit a +`'readable'` event. + +In some cases, listening for a `'readable'` event will cause some data +to be read into the internal buffer from the underlying system, if it +hadn't already. + +```javascript +var readable = getReadableStreamSomehow(); +readable.on('readable', () => { + // there is some data to read now +}); +``` + +Once the internal buffer is drained, a `'readable'` event will fire +again when more data is available. + +The `'readable'` event is not emitted in the "flowing" mode with the +sole exception of the last one, on end-of-stream. + +The `'readable'` event indicates that the stream has new information: +either new data is available or the end of the stream has been reached. +In the former case, [`stream.read()`][stream-read] will return that data. In the +latter case, [`stream.read()`][stream-read] will return null. For instance, in +the following example, `foo.txt` is an empty file: + +```js +const fs = require('fs'); +var rr = fs.createReadStream('foo.txt'); +rr.on('readable', () => { + console.log('readable:', rr.read()); +}); +rr.on('end', () => { + console.log('end'); +}); +``` + +The output of running this script is: + +``` +$ node test.js +readable: null +end +``` + +#### readable.isPaused() + +* Return: {Boolean} + +This method returns whether or not the `readable` has been **explicitly** +paused by client code (using [`stream.pause()`][stream-pause] without a +corresponding [`stream.resume()`][stream-resume]). + +```js +var readable = new stream.Readable + +readable.isPaused() // === false +readable.pause() +readable.isPaused() // === true +readable.resume() +readable.isPaused() // === false +``` + +#### readable.pause() + +* Return: `this` + +This method will cause a stream in flowing mode to stop emitting +[`'data'`][] events, switching out of flowing mode. Any data that becomes +available will remain in the internal buffer. + +```js +var readable = getReadableStreamSomehow(); +readable.on('data', (chunk) => { + console.log('got %d bytes of data', chunk.length); + readable.pause(); + console.log('there will be no more data for 1 second'); + setTimeout(() => { + console.log('now data will start flowing again'); + readable.resume(); + }, 1000); +}); +``` + +#### readable.pipe(destination[, options]) + +* `destination` {stream.Writable} The destination for writing data +* `options` {Object} Pipe options + * `end` {Boolean} End the writer when the reader ends. Default = `true` + +This method pulls all the data out of a readable stream, and writes it +to the supplied destination, automatically managing the flow so that +the destination is not overwhelmed by a fast readable stream. + +Multiple destinations can be piped to safely. + +```js +var readable = getReadableStreamSomehow(); +var writable = fs.createWriteStream('file.txt'); +// All the data from readable goes into 'file.txt' +readable.pipe(writable); +``` + +This function returns the destination stream, so you can set up pipe +chains like so: + +```js +var r = fs.createReadStream('file.txt'); +var z = zlib.createGzip(); +var w = fs.createWriteStream('file.txt.gz'); +r.pipe(z).pipe(w); +``` + +For example, emulating the Unix `cat` command: + +```js +process.stdin.pipe(process.stdout); +``` + +By default [`stream.end()`][stream-end] is called on the destination when the +source stream emits [`'end'`][], so that `destination` is no longer writable. +Pass `{ end: false }` as `options` to keep the destination stream open. + +This keeps `writer` open so that "Goodbye" can be written at the +end. + +```js +reader.pipe(writer, { end: false }); +reader.on('end', () => { + writer.end('Goodbye\n'); +}); +``` + +Note that [`process.stderr`][] and [`process.stdout`][] are never closed until +the process exits, regardless of the specified options. + +#### readable.read([size]) + +* `size` {Number} Optional argument to specify how much data to read. +* Return {String|Buffer|Null} + +The `read()` method pulls some data out of the internal buffer and +returns it. If there is no data available, then it will return +`null`. + +If you pass in a `size` argument, then it will return that many +bytes. If `size` bytes are not available, then it will return `null`, +unless we've ended, in which case it will return the data remaining +in the buffer. + +If you do not specify a `size` argument, then it will return all the +data in the internal buffer. + +This method should only be called in paused mode. In flowing mode, +this method is called automatically until the internal buffer is +drained. + +```js +var readable = getReadableStreamSomehow(); +readable.on('readable', () => { + var chunk; + while (null !== (chunk = readable.read())) { + console.log('got %d bytes of data', chunk.length); + } +}); +``` + +If this method returns a data chunk, then it will also trigger the +emission of a [`'data'`][] event. + +Note that calling [`stream.read([size])`][stream-read] after the [`'end'`][] +event has been triggered will return `null`. No runtime error will be raised. + +#### readable.resume() + +* Return: `this` + +This method will cause the readable stream to resume emitting [`'data'`][] +events. + +This method will switch the stream into flowing mode. If you do *not* +want to consume the data from a stream, but you *do* want to get to +its [`'end'`][] event, you can call [`stream.resume()`][stream-resume] to open +the flow of data. + +```js +var readable = getReadableStreamSomehow(); +readable.resume(); +readable.on('end', () => { + console.log('got to the end, but did not read anything'); +}); +``` + +#### readable.setEncoding(encoding) + +* `encoding` {String} The encoding to use. +* Return: `this` + +Call this function to cause the stream to return strings of the specified +encoding instead of Buffer objects. For example, if you do +`readable.setEncoding('utf8')`, then the output data will be interpreted as +UTF-8 data, and returned as strings. If you do `readable.setEncoding('hex')`, +then the data will be encoded in hexadecimal string format. + +This properly handles multi-byte characters that would otherwise be +potentially mangled if you simply pulled the Buffers directly and +called [`buf.toString(encoding)`][] on them. If you want to read the data +as strings, always use this method. + +Also you can disable any encoding at all with `readable.setEncoding(null)`. +This approach is very useful if you deal with binary data or with large +multi-byte strings spread out over multiple chunks. + +```js +var readable = getReadableStreamSomehow(); +readable.setEncoding('utf8'); +readable.on('data', (chunk) => { + assert.equal(typeof chunk, 'string'); + console.log('got %d characters of string data', chunk.length); +}); +``` + +#### readable.unpipe([destination]) + +* `destination` {stream.Writable} Optional specific stream to unpipe + +This method will remove the hooks set up for a previous [`stream.pipe()`][] +call. + +If the destination is not specified, then all pipes are removed. + +If the destination is specified, but no pipe is set up for it, then +this is a no-op. + +```js +var readable = getReadableStreamSomehow(); +var writable = fs.createWriteStream('file.txt'); +// All the data from readable goes into 'file.txt', +// but only for the first second +readable.pipe(writable); +setTimeout(() => { + console.log('stop writing to file.txt'); + readable.unpipe(writable); + console.log('manually close the file stream'); + writable.end(); +}, 1000); +``` + +#### readable.unshift(chunk) + +* `chunk` {Buffer|String} Chunk of data to unshift onto the read queue + +This is useful in certain cases where a stream is being consumed by a +parser, which needs to "un-consume" some data that it has +optimistically pulled out of the source, so that the stream can be +passed on to some other party. + +Note that `stream.unshift(chunk)` cannot be called after the [`'end'`][] event +has been triggered; a runtime error will be raised. + +If you find that you must often call `stream.unshift(chunk)` in your +programs, consider implementing a [Transform][] stream instead. (See [API +for Stream Implementors][].) + +```js +// Pull off a header delimited by \n\n +// use unshift() if we get too much +// Call the callback with (error, header, stream) +const StringDecoder = require('string_decoder').StringDecoder; +function parseHeader(stream, callback) { + stream.on('error', callback); + stream.on('readable', onReadable); + var decoder = new StringDecoder('utf8'); + var header = ''; + function onReadable() { + var chunk; + while (null !== (chunk = stream.read())) { + var str = decoder.write(chunk); + if (str.match(/\n\n/)) { + // found the header boundary + var split = str.split(/\n\n/); + header += split.shift(); + var remaining = split.join('\n\n'); + var buf = new Buffer(remaining, 'utf8'); + if (buf.length) + stream.unshift(buf); + stream.removeListener('error', callback); + stream.removeListener('readable', onReadable); + // now the body of the message can be read from the stream. + callback(null, header, stream); + } else { + // still reading the header. + header += str; + } + } + } +} +``` + +Note that, unlike [`stream.push(chunk)`][stream-push], `stream.unshift(chunk)` +will not end the reading process by resetting the internal reading state of the +stream. This can cause unexpected results if `unshift()` is called during a +read (i.e. from within a [`stream._read()`][stream-_read] implementation on a +custom stream). Following the call to `unshift()` with an immediate +[`stream.push('')`][stream-push] will reset the reading state appropriately, +however it is best to simply avoid calling `unshift()` while in the process of +performing a read. + +#### readable.wrap(stream) + +* `stream` {Stream} An "old style" readable stream + +Versions of Node.js prior to v0.10 had streams that did not implement the +entire Streams API as it is today. (See [Compatibility][] for +more information.) + +If you are using an older Node.js library that emits [`'data'`][] events and +has a [`stream.pause()`][stream-pause] method that is advisory only, then you +can use the `wrap()` method to create a [Readable][] stream that uses the old +stream as its data source. + +You will very rarely ever need to call this function, but it exists +as a convenience for interacting with old Node.js programs and libraries. + +For example: + +```js +const OldReader = require('./old-api-module.js').OldReader; +const Readable = require('stream').Readable; +const oreader = new OldReader; +const myReader = new Readable().wrap(oreader); + +myReader.on('readable', () => { + myReader.read(); // etc. +}); +``` + +### Class: stream.Transform + +Transform streams are [Duplex][] streams where the output is in some way +computed from the input. They implement both the [Readable][] and +[Writable][] interfaces. + +Examples of Transform streams include: + +* [zlib streams][zlib] +* [crypto streams][crypto] + +### Class: stream.Writable + +<!--type=class--> + +The Writable stream interface is an abstraction for a *destination* +that you are writing data *to*. + +Examples of writable streams include: + +* [HTTP requests, on the client][] +* [HTTP responses, on the server][] +* [fs write streams][] +* [zlib streams][zlib] +* [crypto streams][crypto] +* [TCP sockets][] +* [child process stdin][] +* [`process.stdout`][], [`process.stderr`][] + +#### Event: 'drain' + +If a [`stream.write(chunk)`][stream-write] call returns `false`, then the +`'drain'` event will indicate when it is appropriate to begin writing more data +to the stream. + +```js +// Write the data to the supplied writable stream one million times. +// Be attentive to back-pressure. +function writeOneMillionTimes(writer, data, encoding, callback) { + var i = 1000000; + write(); + function write() { + var ok = true; + do { + i -= 1; + if (i === 0) { + // last time! + writer.write(data, encoding, callback); + } else { + // see if we should continue, or wait + // don't pass the callback, because we're not done yet. + ok = writer.write(data, encoding); + } + } while (i > 0 && ok); + if (i > 0) { + // had to stop early! + // write some more once it drains + writer.once('drain', write); + } + } +} +``` + +#### Event: 'error' + +* {Error} + +Emitted if there was an error when writing or piping data. + +#### Event: 'finish' + +When the [`stream.end()`][stream-end] method has been called, and all data has +been flushed to the underlying system, this event is emitted. + +```javascript +var writer = getWritableStreamSomehow(); +for (var i = 0; i < 100; i ++) { + writer.write('hello, #${i}!\n'); +} +writer.end('this is the end\n'); +writer.on('finish', () => { + console.error('all writes are now complete.'); +}); +``` + +#### Event: 'pipe' + +* `src` {stream.Readable} source stream that is piping to this writable + +This is emitted whenever the [`stream.pipe()`][] method is called on a readable +stream, adding this writable to its set of destinations. + +```js +var writer = getWritableStreamSomehow(); +var reader = getReadableStreamSomehow(); +writer.on('pipe', (src) => { + console.error('something is piping into the writer'); + assert.equal(src, reader); +}); +reader.pipe(writer); +``` + +#### Event: 'unpipe' + +* `src` {[Readable][] Stream} The source stream that + [unpiped][`stream.unpipe()`] this writable + +This is emitted whenever the [`stream.unpipe()`][] method is called on a +readable stream, removing this writable from its set of destinations. + +```js +var writer = getWritableStreamSomehow(); +var reader = getReadableStreamSomehow(); +writer.on('unpipe', (src) => { + console.error('something has stopped piping into the writer'); + assert.equal(src, reader); +}); +reader.pipe(writer); +reader.unpipe(writer); +``` + +#### writable.cork() + +Forces buffering of all writes. + +Buffered data will be flushed either at [`stream.uncork()`][] or at +[`stream.end()`][stream-end] call. + +#### writable.end([chunk][, encoding][, callback]) + +* `chunk` {String|Buffer} Optional data to write +* `encoding` {String} The encoding, if `chunk` is a String +* `callback` {Function} Optional callback for when the stream is finished + +Call this method when no more data will be written to the stream. If supplied, +the callback is attached as a listener on the [`'finish'`][] event. + +Calling [`stream.write()`][stream-write] after calling +[`stream.end()`][stream-end] will raise an error. + +```js +// write 'hello, ' and then end with 'world!' +var file = fs.createWriteStream('example.txt'); +file.write('hello, '); +file.end('world!'); +// writing more now is not allowed! +``` + +#### writable.setDefaultEncoding(encoding) + +* `encoding` {String} The new default encoding + +Sets the default encoding for a writable stream. + +#### writable.uncork() + +Flush all data, buffered since [`stream.cork()`][] call. + +#### writable.write(chunk[, encoding][, callback]) + +* `chunk` {String|Buffer} The data to write +* `encoding` {String} The encoding, if `chunk` is a String +* `callback` {Function} Callback for when this chunk of data is flushed +* Returns: {Boolean} `true` if the data was handled completely. + +This method writes some data to the underlying system, and calls the +supplied callback once the data has been fully handled. + +The return value indicates if you should continue writing right now. +If the data had to be buffered internally, then it will return +`false`. Otherwise, it will return `true`. + +This return value is strictly advisory. You MAY continue to write, +even if it returns `false`. However, writes will be buffered in +memory, so it is best not to do this excessively. Instead, wait for +the [`'drain'`][] event before writing more data. + + +## API for Stream Implementors + +<!--type=misc--> + +To implement any sort of stream, the pattern is the same: + +1. Extend the appropriate parent class in your own subclass. (The + [`util.inherits()`][] method is particularly helpful for this.) +2. Call the appropriate parent class constructor in your constructor, + to be sure that the internal mechanisms are set up properly. +3. Implement one or more specific methods, as detailed below. + +The class to extend and the method(s) to implement depend on the sort +of stream class you are writing: + +<table> + <thead> + <tr> + <th> + <p>Use-case</p> + </th> + <th> + <p>Class</p> + </th> + <th> + <p>Method(s) to implement</p> + </th> + </tr> + </thead> + <tr> + <td> + <p>Reading only</p> + </td> + <td> + <p>[Readable](#stream_class_stream_readable_1)</p> + </td> + <td> + <p><code>[_read][stream-_read]</code></p> + </td> + </tr> + <tr> + <td> + <p>Writing only</p> + </td> + <td> + <p>[Writable](#stream_class_stream_writable_1)</p> + </td> + <td> + <p><code>[_write][stream-_write]</code>, <code>[_writev][stream-_writev]</code></p> + </td> + </tr> + <tr> + <td> + <p>Reading and writing</p> + </td> + <td> + <p>[Duplex](#stream_class_stream_duplex_1)</p> + </td> + <td> + <p><code>[_read][stream-_read]</code>, <code>[_write][stream-_write]</code>, <code>[_writev][stream-_writev]</code></p> + </td> + </tr> + <tr> + <td> + <p>Operate on written data, then read the result</p> + </td> + <td> + <p>[Transform](#stream_class_stream_transform_1)</p> + </td> + <td> + <p><code>[_transform][stream-_transform]</code>, <code>[_flush][stream-_flush]</code></p> + </td> + </tr> +</table> + +In your implementation code, it is very important to never call the methods +described in [API for Stream Consumers][]. Otherwise, you can potentially cause +adverse side effects in programs that consume your streaming interfaces. + +### Class: stream.Duplex + +<!--type=class--> + +A "duplex" stream is one that is both Readable and Writable, such as a TCP +socket connection. + +Note that `stream.Duplex` is an abstract class designed to be extended +with an underlying implementation of the [`stream._read(size)`][stream-_read] +and [`stream._write(chunk, encoding, callback)`][stream-_write] methods as you +would with a Readable or Writable stream class. + +Since JavaScript doesn't have multiple prototypal inheritance, this class +prototypally inherits from Readable, and then parasitically from Writable. It is +thus up to the user to implement both the low-level +[`stream._read(n)`][stream-_read] method as well as the low-level +[`stream._write(chunk, encoding, callback)`][stream-_write] method on extension +duplex classes. + +#### new stream.Duplex(options) + +* `options` {Object} Passed to both Writable and Readable + constructors. Also has the following fields: + * `allowHalfOpen` {Boolean} Default = `true`. If set to `false`, then + the stream will automatically end the readable side when the + writable side ends and vice versa. + * `readableObjectMode` {Boolean} Default = `false`. Sets `objectMode` + for readable side of the stream. Has no effect if `objectMode` + is `true`. + * `writableObjectMode` {Boolean} Default = `false`. Sets `objectMode` + for writable side of the stream. Has no effect if `objectMode` + is `true`. + +In classes that extend the Duplex class, make sure to call the +constructor so that the buffering settings can be properly +initialized. + +### Class: stream.PassThrough + +This is a trivial implementation of a [Transform][] stream that simply +passes the input bytes across to the output. Its purpose is mainly +for examples and testing, but there are occasionally use cases where +it can come in handy as a building block for novel sorts of streams. + +### Class: stream.Readable + +<!--type=class--> + +`stream.Readable` is an abstract class designed to be extended with an +underlying implementation of the [`stream._read(size)`][stream-_read] method. + +Please see [API for Stream Consumers][] for how to consume +streams in your programs. What follows is an explanation of how to +implement Readable streams in your programs. + +#### new stream.Readable([options]) + +* `options` {Object} + * `highWaterMark` {Number} The maximum number of bytes to store in + the internal buffer before ceasing to read from the underlying + resource. Default = `16384` (16kb), or `16` for `objectMode` streams + * `encoding` {String} If specified, then buffers will be decoded to + strings using the specified encoding. Default = `null` + * `objectMode` {Boolean} Whether this stream should behave + as a stream of objects. Meaning that [`stream.read(n)`][stream-read] returns + a single value instead of a Buffer of size n. Default = `false` + * `read` {Function} Implementation for the [`stream._read()`][stream-_read] + method. + +In classes that extend the Readable class, make sure to call the +Readable constructor so that the buffering settings can be properly +initialized. + +#### readable.\_read(size) + +* `size` {Number} Number of bytes to read asynchronously + +Note: **Implement this method, but do NOT call it directly.** + +This method is prefixed with an underscore because it is internal to the +class that defines it and should only be called by the internal Readable +class methods. All Readable stream implementations must provide a \_read +method to fetch data from the underlying resource. + +When `_read()` is called, if data is available from the resource, the `_read()` +implementation should start pushing that data into the read queue by calling +[`this.push(dataChunk)`][stream-push]. `_read()` should continue reading from +the resource and pushing data until push returns `false`, at which point it +should stop reading from the resource. Only when `_read()` is called again after +it has stopped should it start reading more data from the resource and pushing +that data onto the queue. + +Note: once the `_read()` method is called, it will not be called again until +the [`stream.push()`][stream-push] method is called. + +The `size` argument is advisory. Implementations where a "read" is a +single call that returns data can use this to know how much data to +fetch. Implementations where that is not relevant, such as TCP or +TLS, may ignore this argument, and simply provide data whenever it +becomes available. There is no need, for example to "wait" until +`size` bytes are available before calling [`stream.push(chunk)`][stream-push]. + +#### readable.push(chunk[, encoding]) + + +* `chunk` {Buffer|Null|String} Chunk of data to push into the read queue +* `encoding` {String} Encoding of String chunks. Must be a valid + Buffer encoding, such as `'utf8'` or `'ascii'` +* return {Boolean} Whether or not more pushes should be performed + +Note: **This method should be called by Readable implementors, NOT +by consumers of Readable streams.** + +If a value other than null is passed, The `push()` method adds a chunk of data +into the queue for subsequent stream processors to consume. If `null` is +passed, it signals the end of the stream (EOF), after which no more data +can be written. + +The data added with `push()` can be pulled out by calling the +[`stream.read()`][stream-read] method when the [`'readable'`][] event fires. + +This API is designed to be as flexible as possible. For example, +you may be wrapping a lower-level source which has some sort of +pause/resume mechanism, and a data callback. In those cases, you +could wrap the low-level source object by doing something like this: + +```js +// source is an object with readStop() and readStart() methods, +// and an `ondata` member that gets called when it has data, and +// an `onend` member that gets called when the data is over. + +util.inherits(SourceWrapper, Readable); + +function SourceWrapper(options) { + Readable.call(this, options); + + this._source = getLowlevelSourceObject(); + + // Every time there's data, we push it into the internal buffer. + this._source.ondata = (chunk) => { + // if push() returns false, then we need to stop reading from source + if (!this.push(chunk)) + this._source.readStop(); + }; + + // When the source ends, we push the EOF-signaling `null` chunk + this._source.onend = () => { + this.push(null); + }; +} + +// _read will be called when the stream wants to pull more data in +// the advisory size argument is ignored in this case. +SourceWrapper.prototype._read = function(size) { + this._source.readStart(); +}; +``` + +#### Example: A Counting Stream + +<!--type=example--> + +This is a basic example of a Readable stream. It emits the numerals +from 1 to 1,000,000 in ascending order, and then ends. + +```js +const Readable = require('stream').Readable; +const util = require('util'); +util.inherits(Counter, Readable); + +function Counter(opt) { + Readable.call(this, opt); + this._max = 1000000; + this._index = 1; +} + +Counter.prototype._read = function() { + var i = this._index++; + if (i > this._max) + this.push(null); + else { + var str = '' + i; + var buf = new Buffer(str, 'ascii'); + this.push(buf); + } +}; +``` + +#### Example: SimpleProtocol v1 (Sub-optimal) + +This is similar to the `parseHeader` function described +[here](#stream_readable_unshift_chunk), but implemented as a custom stream. +Also, note that this implementation does not convert the incoming data to a +string. + +However, this would be better implemented as a [Transform][] stream. See +[SimpleProtocol v2][] for a better implementation. + +```js +// A parser for a simple data protocol. +// The "header" is a JSON object, followed by 2 \n characters, and +// then a message body. +// +// NOTE: This can be done more simply as a Transform stream! +// Using Readable directly for this is sub-optimal. See the +// alternative example below under the Transform section. + +const Readable = require('stream').Readable; +const util = require('util'); + +util.inherits(SimpleProtocol, Readable); + +function SimpleProtocol(source, options) { + if (!(this instanceof SimpleProtocol)) + return new SimpleProtocol(source, options); + + Readable.call(this, options); + this._inBody = false; + this._sawFirstCr = false; + + // source is a readable stream, such as a socket or file + this._source = source; + + var self = this; + source.on('end', () => { + self.push(null); + }); + + // give it a kick whenever the source is readable + // read(0) will not consume any bytes + source.on('readable', () => { + self.read(0); + }); + + this._rawHeader = []; + this.header = null; +} + +SimpleProtocol.prototype._read = function(n) { + if (!this._inBody) { + var chunk = this._source.read(); + + // if the source doesn't have data, we don't have data yet. + if (chunk === null) + return this.push(''); + + // check if the chunk has a \n\n + var split = -1; + for (var i = 0; i < chunk.length; i++) { + if (chunk[i] === 10) { // '\n' + if (this._sawFirstCr) { + split = i; + break; + } else { + this._sawFirstCr = true; + } + } else { + this._sawFirstCr = false; + } + } + + if (split === -1) { + // still waiting for the \n\n + // stash the chunk, and try again. + this._rawHeader.push(chunk); + this.push(''); + } else { + this._inBody = true; + var h = chunk.slice(0, split); + this._rawHeader.push(h); + var header = Buffer.concat(this._rawHeader).toString(); + try { + this.header = JSON.parse(header); + } catch (er) { + this.emit('error', new Error('invalid simple protocol data')); + return; + } + // now, because we got some extra data, unshift the rest + // back into the read queue so that our consumer will see it. + var b = chunk.slice(split); + this.unshift(b); + // calling unshift by itself does not reset the reading state + // of the stream; since we're inside _read, doing an additional + // push('') will reset the state appropriately. + this.push(''); + + // and let them know that we are done parsing the header. + this.emit('header', this.header); + } + } else { + // from there on, just provide the data to our consumer. + // careful not to push(null), since that would indicate EOF. + var chunk = this._source.read(); + if (chunk) this.push(chunk); + } +}; + +// Usage: +// var parser = new SimpleProtocol(source); +// Now parser is a readable stream that will emit 'header' +// with the parsed header data. +``` + +### Class: stream.Transform + +A "transform" stream is a duplex stream where the output is causally +connected in some way to the input, such as a [zlib][] stream or a +[crypto][] stream. + +There is no requirement that the output be the same size as the input, +the same number of chunks, or arrive at the same time. For example, a +Hash stream will only ever have a single chunk of output which is +provided when the input is ended. A zlib stream will produce output +that is either much smaller or much larger than its input. + +Rather than implement the [`stream._read()`][stream-_read] and +[`stream._write()`][stream-_write] methods, Transform classes must implement the +[`stream._transform()`][stream-_transform] method, and may optionally +also implement the [`stream._flush()`][stream-_flush] method. (See below.) + +#### new stream.Transform([options]) + +* `options` {Object} Passed to both Writable and Readable + constructors. Also has the following fields: + * `transform` {Function} Implementation for the + [`stream._transform()`][stream-_transform] method. + * `flush` {Function} Implementation for the [`stream._flush()`][stream-_flush] + method. + +In classes that extend the Transform class, make sure to call the +constructor so that the buffering settings can be properly +initialized. + +#### Events: 'finish' and 'end' + +The [`'finish'`][] and [`'end'`][] events are from the parent Writable +and Readable classes respectively. The `'finish'` event is fired after +[`stream.end()`][stream-end] is called and all chunks have been processed by +[`stream._transform()`][stream-_transform], `'end'` is fired after all data has +been output which is after the callback in [`stream._flush()`][stream-_flush] +has been called. + +#### transform.\_flush(callback) + +* `callback` {Function} Call this function (optionally with an error + argument) when you are done flushing any remaining data. + +Note: **This function MUST NOT be called directly.** It MAY be implemented +by child classes, and if so, will be called by the internal Transform +class methods only. + +In some cases, your transform operation may need to emit a bit more +data at the end of the stream. For example, a `Zlib` compression +stream will store up some internal state so that it can optimally +compress the output. At the end, however, it needs to do the best it +can with what is left, so that the data will be complete. + +In those cases, you can implement a `_flush()` method, which will be +called at the very end, after all the written data is consumed, but +before emitting [`'end'`][] to signal the end of the readable side. Just +like with [`stream._transform()`][stream-_transform], call +`transform.push(chunk)` zero or more times, as appropriate, and call `callback` +when the flush operation is complete. + +This method is prefixed with an underscore because it is internal to +the class that defines it, and should not be called directly by user +programs. However, you **are** expected to override this method in +your own extension classes. + +#### transform.\_transform(chunk, encoding, callback) + +* `chunk` {Buffer|String} The chunk to be transformed. Will **always** + be a buffer unless the `decodeStrings` option was set to `false`. +* `encoding` {String} If the chunk is a string, then this is the + encoding type. If chunk is a buffer, then this is the special + value - 'buffer', ignore it in this case. +* `callback` {Function} Call this function (optionally with an error + argument and data) when you are done processing the supplied chunk. + +Note: **This function MUST NOT be called directly.** It should be +implemented by child classes, and called by the internal Transform +class methods only. + +All Transform stream implementations must provide a `_transform()` +method to accept input and produce output. + +`_transform()` should do whatever has to be done in this specific +Transform class, to handle the bytes being written, and pass them off +to the readable portion of the interface. Do asynchronous I/O, +process things, and so on. + +Call `transform.push(outputChunk)` 0 or more times to generate output +from this input chunk, depending on how much data you want to output +as a result of this chunk. + +Call the callback function only when the current chunk is completely +consumed. Note that there may or may not be output as a result of any +particular input chunk. If you supply a second argument to the callback +it will be passed to the push method. In other words the following are +equivalent: + +```js +transform.prototype._transform = function (data, encoding, callback) { + this.push(data); + callback(); +}; + +transform.prototype._transform = function (data, encoding, callback) { + callback(null, data); +}; +``` + +This method is prefixed with an underscore because it is internal to +the class that defines it, and should not be called directly by user +programs. However, you **are** expected to override this method in +your own extension classes. + +#### Example: `SimpleProtocol` parser v2 + +The example [here](#stream_example_simpleprotocol_v1_sub_optimal) of a simple +protocol parser can be implemented simply by using the higher level +[Transform][] stream class, similar to the `parseHeader` and `SimpleProtocol +v1` examples. + +In this example, rather than providing the input as an argument, it +would be piped into the parser, which is a more idiomatic Node.js stream +approach. + +```javascript +const util = require('util'); +const Transform = require('stream').Transform; +util.inherits(SimpleProtocol, Transform); + +function SimpleProtocol(options) { + if (!(this instanceof SimpleProtocol)) + return new SimpleProtocol(options); + + Transform.call(this, options); + this._inBody = false; + this._sawFirstCr = false; + this._rawHeader = []; + this.header = null; +} + +SimpleProtocol.prototype._transform = function(chunk, encoding, done) { + if (!this._inBody) { + // check if the chunk has a \n\n + var split = -1; + for (var i = 0; i < chunk.length; i++) { + if (chunk[i] === 10) { // '\n' + if (this._sawFirstCr) { + split = i; + break; + } else { + this._sawFirstCr = true; + } + } else { + this._sawFirstCr = false; + } + } + + if (split === -1) { + // still waiting for the \n\n + // stash the chunk, and try again. + this._rawHeader.push(chunk); + } else { + this._inBody = true; + var h = chunk.slice(0, split); + this._rawHeader.push(h); + var header = Buffer.concat(this._rawHeader).toString(); + try { + this.header = JSON.parse(header); + } catch (er) { + this.emit('error', new Error('invalid simple protocol data')); + return; + } + // and let them know that we are done parsing the header. + this.emit('header', this.header); + + // now, because we got some extra data, emit this first. + this.push(chunk.slice(split)); + } + } else { + // from there on, just provide the data to our consumer as-is. + this.push(chunk); + } + done(); +}; + +// Usage: +// var parser = new SimpleProtocol(); +// source.pipe(parser) +// Now parser is a readable stream that will emit 'header' +// with the parsed header data. +``` + +### Class: stream.Writable + +<!--type=class--> + +`stream.Writable` is an abstract class designed to be extended with an +underlying implementation of the +[`stream._write(chunk, encoding, callback)`][stream-_write] method. + +Please see [API for Stream Consumers][] for how to consume +writable streams in your programs. What follows is an explanation of +how to implement Writable streams in your programs. + +#### new stream.Writable([options]) + +* `options` {Object} + * `highWaterMark` {Number} Buffer level when + [`stream.write()`][stream-write] starts returning `false`. Default = `16384` + (16kb), or `16` for `objectMode` streams. + * `decodeStrings` {Boolean} Whether or not to decode strings into + Buffers before passing them to [`stream._write()`][stream-_write]. + Default = `true` + * `objectMode` {Boolean} Whether or not the + [`stream.write(anyObj)`][stream-write] is a valid operation. If set you can + write arbitrary data instead of only `Buffer` / `String` data. + Default = `false` + * `write` {Function} Implementation for the + [`stream._write()`][stream-_write] method. + * `writev` {Function} Implementation for the + [`stream._writev()`][stream-_writev] method. + +In classes that extend the Writable class, make sure to call the +constructor so that the buffering settings can be properly +initialized. + +#### writable.\_write(chunk, encoding, callback) + +* `chunk` {Buffer|String} The chunk to be written. Will **always** + be a buffer unless the `decodeStrings` option was set to `false`. +* `encoding` {String} If the chunk is a string, then this is the + encoding type. If chunk is a buffer, then this is the special + value - 'buffer', ignore it in this case. +* `callback` {Function} Call this function (optionally with an error + argument) when you are done processing the supplied chunk. + +All Writable stream implementations must provide a +[`stream._write()`][stream-_write] method to send data to the underlying +resource. + +Note: **This function MUST NOT be called directly.** It should be +implemented by child classes, and called by the internal Writable +class methods only. + +Call the callback using the standard `callback(error)` pattern to +signal that the write completed successfully or with an error. + +If the `decodeStrings` flag is set in the constructor options, then +`chunk` may be a string rather than a Buffer, and `encoding` will +indicate the sort of string that it is. This is to support +implementations that have an optimized handling for certain string +data encodings. If you do not explicitly set the `decodeStrings` +option to `false`, then you can safely ignore the `encoding` argument, +and assume that `chunk` will always be a Buffer. + +This method is prefixed with an underscore because it is internal to +the class that defines it, and should not be called directly by user +programs. However, you **are** expected to override this method in +your own extension classes. + +#### writable.\_writev(chunks, callback) + +* `chunks` {Array} The chunks to be written. Each chunk has following + format: `{ chunk: ..., encoding: ... }`. +* `callback` {Function} Call this function (optionally with an error + argument) when you are done processing the supplied chunks. + +Note: **This function MUST NOT be called directly.** It may be +implemented by child classes, and called by the internal Writable +class methods only. + +This function is completely optional to implement. In most cases it is +unnecessary. If implemented, it will be called with all the chunks +that are buffered in the write queue. + + +## Simplified Constructor API + +<!--type=misc--> + +In simple cases there is now the added benefit of being able to construct a +stream without inheritance. + +This can be done by passing the appropriate methods as constructor options: + +Examples: + +### Duplex + +```js +var duplex = new stream.Duplex({ + read: function(n) { + // sets this._read under the hood + + // push data onto the read queue, passing null + // will signal the end of the stream (EOF) + this.push(chunk); + }, + write: function(chunk, encoding, next) { + // sets this._write under the hood + + // An optional error can be passed as the first argument + next() + } +}); + +// or + +var duplex = new stream.Duplex({ + read: function(n) { + // sets this._read under the hood + + // push data onto the read queue, passing null + // will signal the end of the stream (EOF) + this.push(chunk); + }, + writev: function(chunks, next) { + // sets this._writev under the hood + + // An optional error can be passed as the first argument + next() + } +}); +``` + +### Readable + +```js +var readable = new stream.Readable({ + read: function(n) { + // sets this._read under the hood + + // push data onto the read queue, passing null + // will signal the end of the stream (EOF) + this.push(chunk); + } +}); +``` + +### Transform + +```js +var transform = new stream.Transform({ + transform: function(chunk, encoding, next) { + // sets this._transform under the hood + + // generate output as many times as needed + // this.push(chunk); + + // call when the current chunk is consumed + next(); + }, + flush: function(done) { + // sets this._flush under the hood + + // generate output as many times as needed + // this.push(chunk); + + done(); + } +}); +``` + +### Writable + +```js +var writable = new stream.Writable({ + write: function(chunk, encoding, next) { + // sets this._write under the hood + + // An optional error can be passed as the first argument + next() + } +}); + +// or + +var writable = new stream.Writable({ + writev: function(chunks, next) { + // sets this._writev under the hood + + // An optional error can be passed as the first argument + next() + } +}); +``` + +## Streams: Under the Hood + +<!--type=misc--> + +### Buffering + +<!--type=misc--> + +Both Writable and Readable streams will buffer data on an internal +object which can be retrieved from `_writableState.getBuffer()` or +`_readableState.buffer`, respectively. + +The amount of data that will potentially be buffered depends on the +`highWaterMark` option which is passed into the constructor. + +Buffering in Readable streams happens when the implementation calls +[`stream.push(chunk)`][stream-push]. If the consumer of the Stream does not +call [`stream.read()`][stream-read], then the data will sit in the internal +queue until it is consumed. + +Buffering in Writable streams happens when the user calls +[`stream.write(chunk)`][stream-write] repeatedly, even when it returns `false`. + +The purpose of streams, especially with the [`stream.pipe()`][] method, is to +limit the buffering of data to acceptable levels, so that sources and +destinations of varying speed will not overwhelm the available memory. + +### Compatibility with Older Node.js Versions + +<!--type=misc--> + +In versions of Node.js prior to v0.10, the Readable stream interface was +simpler, but also less powerful and less useful. + +* Rather than waiting for you to call the [`stream.read()`][stream-read] method, + [`'data'`][] events would start emitting immediately. If you needed to do + some I/O to decide how to handle data, then you had to store the chunks + in some kind of buffer so that they would not be lost. +* The [`stream.pause()`][stream-pause] method was advisory, rather than + guaranteed. This meant that you still had to be prepared to receive + [`'data'`][] events even when the stream was in a paused state. + +In Node.js v0.10, the [Readable][] class was added. +For backwards compatibility with older Node.js programs, Readable streams +switch into "flowing mode" when a [`'data'`][] event handler is added, or +when the [`stream.resume()`][stream-resume] method is called. The effect is +that, even if you are not using the new [`stream.read()`][stream-read] method +and [`'readable'`][] event, you no longer have to worry about losing +[`'data'`][] chunks. + +Most programs will continue to function normally. However, this +introduces an edge case in the following conditions: + +* No [`'data'`][] event handler is added. +* The [`stream.resume()`][stream-resume] method is never called. +* The stream is not piped to any writable destination. + +For example, consider the following code: + +```js +// WARNING! BROKEN! +net.createServer((socket) => { + + // we add an 'end' method, but never consume the data + socket.on('end', () => { + // It will never get here. + socket.end('I got your message (but didnt read it)\n'); + }); + +}).listen(1337); +``` + +In versions of Node.js prior to v0.10, the incoming message data would be +simply discarded. However, in Node.js v0.10 and beyond, +the socket will remain paused forever. + +The workaround in this situation is to call the +[`stream.resume()`][stream-resume] method to start the flow of data: + +```js +// Workaround +net.createServer((socket) => { + + socket.on('end', () => { + socket.end('I got your message (but didnt read it)\n'); + }); + + // start the flow of data, discarding it. + socket.resume(); + +}).listen(1337); +``` + +In addition to new Readable streams switching into flowing mode, +pre-v0.10 style streams can be wrapped in a Readable class using the +[`stream.wrap()`][] method. + + +### Object Mode + +<!--type=misc--> + +Normally, Streams operate on Strings and Buffers exclusively. + +Streams that are in **object mode** can emit generic JavaScript values +other than Buffers and Strings. + +A Readable stream in object mode will always return a single item from +a call to [`stream.read(size)`][stream-read], regardless of what the size +argument is. + +A Writable stream in object mode will always ignore the `encoding` +argument to [`stream.write(data, encoding)`][stream-write]. + +The special value `null` still retains its special value for object +mode streams. That is, for object mode readable streams, `null` as a +return value from [`stream.read()`][stream-read] indicates that there is no more +data, and [`stream.push(null)`][stream-push] will signal the end of stream data +(`EOF`). + +No streams in Node.js core are object mode streams. This pattern is only +used by userland streaming libraries. + +You should set `objectMode` in your stream child class constructor on +the options object. Setting `objectMode` mid-stream is not safe. + +For Duplex streams `objectMode` can be set exclusively for readable or +writable side with `readableObjectMode` and `writableObjectMode` +respectively. These options can be used to implement parsers and +serializers with Transform streams. + +```js +const util = require('util'); +const StringDecoder = require('string_decoder').StringDecoder; +const Transform = require('stream').Transform; +util.inherits(JSONParseStream, Transform); + +// Gets \n-delimited JSON string data, and emits the parsed objects +function JSONParseStream() { + if (!(this instanceof JSONParseStream)) + return new JSONParseStream(); + + Transform.call(this, { readableObjectMode : true }); + + this._buffer = ''; + this._decoder = new StringDecoder('utf8'); +} + +JSONParseStream.prototype._transform = function(chunk, encoding, cb) { + this._buffer += this._decoder.write(chunk); + // split on newlines + var lines = this._buffer.split(/\r?\n/); + // keep the last partial line buffered + this._buffer = lines.pop(); + for (var l = 0; l < lines.length; l++) { + var line = lines[l]; + try { + var obj = JSON.parse(line); + } catch (er) { + this.emit('error', er); + return; + } + // push the parsed object out to the readable consumer + this.push(obj); + } + cb(); +}; + +JSONParseStream.prototype._flush = function(cb) { + // Just handle any leftover + var rem = this._buffer.trim(); + if (rem) { + try { + var obj = JSON.parse(rem); + } catch (er) { + this.emit('error', er); + return; + } + // push the parsed object out to the readable consumer + this.push(obj); + } + cb(); +}; +``` + +### `stream.read(0)` + +There are some cases where you want to trigger a refresh of the +underlying readable stream mechanisms, without actually consuming any +data. In that case, you can call `stream.read(0)`, which will always +return null. + +If the internal read buffer is below the `highWaterMark`, and the +stream is not currently reading, then calling `stream.read(0)` will trigger +a low-level [`stream._read()`][stream-_read] call. + +There is almost never a need to do this. However, you will see some +cases in Node.js's internals where this is done, particularly in the +Readable stream class internals. + +### `stream.push('')` + +Pushing a zero-byte string or Buffer (when not in [Object mode][]) has an +interesting side effect. Because it *is* a call to +[`stream.push()`][stream-push], it will end the `reading` process. However, it +does *not* add any data to the readable buffer, so there's nothing for +a user to consume. + +Very rarely, there are cases where you have no data to provide now, +but the consumer of your stream (or, perhaps, another bit of your own +code) will know when to check again, by calling [`stream.read(0)`][stream-read]. +In those cases, you *may* call `stream.push('')`. + +So far, the only use case for this functionality is in the +[`tls.CryptoStream`][] class, which is deprecated in Node.js/io.js v1.0. If you +find that you have to use `stream.push('')`, please consider another +approach, because it almost certainly indicates that something is +horribly wrong. + +[`'data'`]: #stream_event_data +[`'drain'`]: #stream_event_drain +[`'end'`]: #stream_event_end +[`'finish'`]: #stream_event_finish +[`'readable'`]: #stream_event_readable +[`buf.toString(encoding)`]: https://nodejs.org/docs/v5.8.0/api/buffer.html#buffer_buf_tostring_encoding_start_end +[`EventEmitter`]: https://nodejs.org/docs/v5.8.0/api/events.html#events_class_eventemitter +[`process.stderr`]: https://nodejs.org/docs/v5.8.0/api/process.html#process_process_stderr +[`process.stdin`]: https://nodejs.org/docs/v5.8.0/api/process.html#process_process_stdin +[`process.stdout`]: https://nodejs.org/docs/v5.8.0/api/process.html#process_process_stdout +[`stream.cork()`]: #stream_writable_cork +[`stream.pipe()`]: #stream_readable_pipe_destination_options +[`stream.uncork()`]: #stream_writable_uncork +[`stream.unpipe()`]: #stream_readable_unpipe_destination +[`stream.wrap()`]: #stream_readable_wrap_stream +[`tls.CryptoStream`]: https://nodejs.org/docs/v5.8.0/api/tls.html#tls_class_cryptostream +[`util.inherits()`]: https://nodejs.org/docs/v5.8.0/api/util.html#util_util_inherits_constructor_superconstructor +[API for Stream Consumers]: #stream_api_for_stream_consumers +[API for Stream Implementors]: #stream_api_for_stream_implementors +[child process stdin]: https://nodejs.org/docs/v5.8.0/api/child_process.html#child_process_child_stdin +[child process stdout and stderr]: https://nodejs.org/docs/v5.8.0/api/child_process.html#child_process_child_stdout +[Compatibility]: #stream_compatibility_with_older_node_js_versions +[crypto]: crypto.html +[Duplex]: #stream_class_stream_duplex +[fs read streams]: https://nodejs.org/docs/v5.8.0/api/fs.html#fs_class_fs_readstream +[fs write streams]: https://nodejs.org/docs/v5.8.0/api/fs.html#fs_class_fs_writestream +[HTTP requests, on the client]: https://nodejs.org/docs/v5.8.0/api/http.html#http_class_http_clientrequest +[HTTP responses, on the server]: https://nodejs.org/docs/v5.8.0/api/http.html#http_class_http_serverresponse +[http-incoming-message]: https://nodejs.org/docs/v5.8.0/api/http.html#http_class_http_incomingmessage +[Object mode]: #stream_object_mode +[Readable]: #stream_class_stream_readable +[SimpleProtocol v2]: #stream_example_simpleprotocol_parser_v2 +[stream-_flush]: #stream_transform_flush_callback +[stream-_read]: #stream_readable_read_size_1 +[stream-_transform]: #stream_transform_transform_chunk_encoding_callback +[stream-_write]: #stream_writable_write_chunk_encoding_callback_1 +[stream-_writev]: #stream_writable_writev_chunks_callback +[stream-end]: #stream_writable_end_chunk_encoding_callback +[stream-pause]: #stream_readable_pause +[stream-push]: #stream_readable_push_chunk_encoding +[stream-read]: #stream_readable_read_size +[stream-resume]: #stream_readable_resume +[stream-write]: #stream_writable_write_chunk_encoding_callback +[TCP sockets]: https://nodejs.org/docs/v5.8.0/api/net.html#net_class_net_socket +[Transform]: #stream_class_stream_transform +[Writable]: #stream_class_stream_writable +[zlib]: zlib.html diff --git a/node_modules/bl/node_modules/readable-stream/doc/wg-meetings/2015-01-30.md b/node_modules/bl/node_modules/readable-stream/doc/wg-meetings/2015-01-30.md new file mode 100644 index 000000000..83275f192 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/doc/wg-meetings/2015-01-30.md @@ -0,0 +1,60 @@ +# streams WG Meeting 2015-01-30 + +## Links + +* **Google Hangouts Video**: http://www.youtube.com/watch?v=I9nDOSGfwZg +* **GitHub Issue**: https://github.com/iojs/readable-stream/issues/106 +* **Original Minutes Google Doc**: https://docs.google.com/document/d/17aTgLnjMXIrfjgNaTUnHQO7m3xgzHR2VXBTmi03Qii4/ + +## Agenda + +Extracted from https://github.com/iojs/readable-stream/labels/wg-agenda prior to meeting. + +* adopt a charter [#105](https://github.com/iojs/readable-stream/issues/105) +* release and versioning strategy [#101](https://github.com/iojs/readable-stream/issues/101) +* simpler stream creation [#102](https://github.com/iojs/readable-stream/issues/102) +* proposal: deprecate implicit flowing of streams [#99](https://github.com/iojs/readable-stream/issues/99) + +## Minutes + +### adopt a charter + +* group: +1's all around + +### What versioning scheme should be adopted? +* group: +1’s 3.0.0 +* domenic+group: pulling in patches from other sources where appropriate +* mikeal: version independently, suggesting versions for io.js +* mikeal+domenic: work with TC to notify in advance of changes +simpler stream creation + +### streamline creation of streams +* sam: streamline creation of streams +* domenic: nice simple solution posted + but, we lose the opportunity to change the model + may not be backwards incompatible (double check keys) + + **action item:** domenic will check + +### remove implicit flowing of streams on(‘data’) +* add isFlowing / isPaused +* mikeal: worrying that we’re documenting polyfill methods – confuses users +* domenic: more reflective API is probably good, with warning labels for users +* new section for mad scientists (reflective stream access) +* calvin: name the “third state” +* mikeal: maybe borrow the name from whatwg? +* domenic: we’re missing the “third state” +* consensus: kind of difficult to name the third state +* mikeal: figure out differences in states / compat +* mathias: always flow on data – eliminates third state + * explore what it breaks + +**action items:** +* ask isaac for ability to list packages by what public io.js APIs they use (esp. Stream) +* ask rod/build for infrastructure +* **chris**: explore the “flow on data” approach +* add isPaused/isFlowing +* add new docs section +* move isPaused to that section + + diff --git a/node_modules/bl/node_modules/readable-stream/duplex.js b/node_modules/bl/node_modules/readable-stream/duplex.js new file mode 100644 index 000000000..ca807af87 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/duplex.js @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_duplex.js") diff --git a/node_modules/bl/node_modules/readable-stream/lib/_stream_duplex.js b/node_modules/bl/node_modules/readable-stream/lib/_stream_duplex.js new file mode 100644 index 000000000..736693b84 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/lib/_stream_duplex.js @@ -0,0 +1,75 @@ +// a duplex stream is just a stream that is both readable and writable. +// Since JS doesn't have multiple prototypal inheritance, this class +// prototypally inherits from Readable, and then parasitically from +// Writable. + +'use strict'; + +/*<replacement>*/ + +var objectKeys = Object.keys || function (obj) { + var keys = []; + for (var key in obj) { + keys.push(key); + }return keys; +}; +/*</replacement>*/ + +module.exports = Duplex; + +/*<replacement>*/ +var processNextTick = require('process-nextick-args'); +/*</replacement>*/ + +/*<replacement>*/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/*</replacement>*/ + +var Readable = require('./_stream_readable'); +var Writable = require('./_stream_writable'); + +util.inherits(Duplex, Readable); + +var keys = objectKeys(Writable.prototype); +for (var v = 0; v < keys.length; v++) { + var method = keys[v]; + if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method]; +} + +function Duplex(options) { + if (!(this instanceof Duplex)) return new Duplex(options); + + Readable.call(this, options); + Writable.call(this, options); + + if (options && options.readable === false) this.readable = false; + + if (options && options.writable === false) this.writable = false; + + this.allowHalfOpen = true; + if (options && options.allowHalfOpen === false) this.allowHalfOpen = false; + + this.once('end', onend); +} + +// the no-half-open enforcer +function onend() { + // if we allow half-open state, or if the writable side ended, + // then we're ok. + if (this.allowHalfOpen || this._writableState.ended) return; + + // no more data can be written. + // But allow more writes to happen in this tick. + processNextTick(onEndNT, this); +} + +function onEndNT(self) { + self.end(); +} + +function forEach(xs, f) { + for (var i = 0, l = xs.length; i < l; i++) { + f(xs[i], i); + } +}
\ No newline at end of file diff --git a/node_modules/bl/node_modules/readable-stream/lib/_stream_passthrough.js b/node_modules/bl/node_modules/readable-stream/lib/_stream_passthrough.js new file mode 100644 index 000000000..d06f71f18 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/lib/_stream_passthrough.js @@ -0,0 +1,26 @@ +// a passthrough stream. +// basically just the most minimal sort of Transform stream. +// Every written chunk gets output as-is. + +'use strict'; + +module.exports = PassThrough; + +var Transform = require('./_stream_transform'); + +/*<replacement>*/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/*</replacement>*/ + +util.inherits(PassThrough, Transform); + +function PassThrough(options) { + if (!(this instanceof PassThrough)) return new PassThrough(options); + + Transform.call(this, options); +} + +PassThrough.prototype._transform = function (chunk, encoding, cb) { + cb(null, chunk); +};
\ No newline at end of file diff --git a/node_modules/bl/node_modules/readable-stream/lib/_stream_readable.js b/node_modules/bl/node_modules/readable-stream/lib/_stream_readable.js new file mode 100644 index 000000000..54a9d5c55 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/lib/_stream_readable.js @@ -0,0 +1,880 @@ +'use strict'; + +module.exports = Readable; + +/*<replacement>*/ +var processNextTick = require('process-nextick-args'); +/*</replacement>*/ + +/*<replacement>*/ +var isArray = require('isarray'); +/*</replacement>*/ + +/*<replacement>*/ +var Buffer = require('buffer').Buffer; +/*</replacement>*/ + +Readable.ReadableState = ReadableState; + +var EE = require('events'); + +/*<replacement>*/ +var EElistenerCount = function (emitter, type) { + return emitter.listeners(type).length; +}; +/*</replacement>*/ + +/*<replacement>*/ +var Stream; +(function () { + try { + Stream = require('st' + 'ream'); + } catch (_) {} finally { + if (!Stream) Stream = require('events').EventEmitter; + } +})(); +/*</replacement>*/ + +var Buffer = require('buffer').Buffer; + +/*<replacement>*/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/*</replacement>*/ + +/*<replacement>*/ +var debugUtil = require('util'); +var debug = undefined; +if (debugUtil && debugUtil.debuglog) { + debug = debugUtil.debuglog('stream'); +} else { + debug = function () {}; +} +/*</replacement>*/ + +var StringDecoder; + +util.inherits(Readable, Stream); + +var Duplex; +function ReadableState(options, stream) { + Duplex = Duplex || require('./_stream_duplex'); + + options = options || {}; + + // object stream flag. Used to make read(n) ignore n and to + // make all the buffer merging and length checks go away + this.objectMode = !!options.objectMode; + + if (stream instanceof Duplex) this.objectMode = this.objectMode || !!options.readableObjectMode; + + // the point at which it stops calling _read() to fill the buffer + // Note: 0 is a valid value, means "don't call _read preemptively ever" + var hwm = options.highWaterMark; + var defaultHwm = this.objectMode ? 16 : 16 * 1024; + this.highWaterMark = hwm || hwm === 0 ? hwm : defaultHwm; + + // cast to ints. + this.highWaterMark = ~ ~this.highWaterMark; + + this.buffer = []; + this.length = 0; + this.pipes = null; + this.pipesCount = 0; + this.flowing = null; + this.ended = false; + this.endEmitted = false; + this.reading = false; + + // a flag to be able to tell if the onwrite cb is called immediately, + // or on a later tick. We set this to true at first, because any + // actions that shouldn't happen until "later" should generally also + // not happen before the first write call. + this.sync = true; + + // whenever we return null, then we set a flag to say + // that we're awaiting a 'readable' event emission. + this.needReadable = false; + this.emittedReadable = false; + this.readableListening = false; + this.resumeScheduled = false; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // when piping, we only care about 'readable' events that happen + // after read()ing all the bytes and not getting any pushback. + this.ranOut = false; + + // the number of writers that are awaiting a drain event in .pipe()s + this.awaitDrain = 0; + + // if true, a maybeReadMore has been scheduled + this.readingMore = false; + + this.decoder = null; + this.encoding = null; + if (options.encoding) { + if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; + this.decoder = new StringDecoder(options.encoding); + this.encoding = options.encoding; + } +} + +var Duplex; +function Readable(options) { + Duplex = Duplex || require('./_stream_duplex'); + + if (!(this instanceof Readable)) return new Readable(options); + + this._readableState = new ReadableState(options, this); + + // legacy + this.readable = true; + + if (options && typeof options.read === 'function') this._read = options.read; + + Stream.call(this); +} + +// Manually shove something into the read() buffer. +// This returns true if the highWaterMark has not been hit yet, +// similar to how Writable.write() returns true if you should +// write() some more. +Readable.prototype.push = function (chunk, encoding) { + var state = this._readableState; + + if (!state.objectMode && typeof chunk === 'string') { + encoding = encoding || state.defaultEncoding; + if (encoding !== state.encoding) { + chunk = new Buffer(chunk, encoding); + encoding = ''; + } + } + + return readableAddChunk(this, state, chunk, encoding, false); +}; + +// Unshift should *always* be something directly out of read() +Readable.prototype.unshift = function (chunk) { + var state = this._readableState; + return readableAddChunk(this, state, chunk, '', true); +}; + +Readable.prototype.isPaused = function () { + return this._readableState.flowing === false; +}; + +function readableAddChunk(stream, state, chunk, encoding, addToFront) { + var er = chunkInvalid(state, chunk); + if (er) { + stream.emit('error', er); + } else if (chunk === null) { + state.reading = false; + onEofChunk(stream, state); + } else if (state.objectMode || chunk && chunk.length > 0) { + if (state.ended && !addToFront) { + var e = new Error('stream.push() after EOF'); + stream.emit('error', e); + } else if (state.endEmitted && addToFront) { + var e = new Error('stream.unshift() after end event'); + stream.emit('error', e); + } else { + var skipAdd; + if (state.decoder && !addToFront && !encoding) { + chunk = state.decoder.write(chunk); + skipAdd = !state.objectMode && chunk.length === 0; + } + + if (!addToFront) state.reading = false; + + // Don't add to the buffer if we've decoded to an empty string chunk and + // we're not in object mode + if (!skipAdd) { + // if we want the data now, just emit it. + if (state.flowing && state.length === 0 && !state.sync) { + stream.emit('data', chunk); + stream.read(0); + } else { + // update the buffer info. + state.length += state.objectMode ? 1 : chunk.length; + if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk); + + if (state.needReadable) emitReadable(stream); + } + } + + maybeReadMore(stream, state); + } + } else if (!addToFront) { + state.reading = false; + } + + return needMoreData(state); +} + +// if it's past the high water mark, we can push in some more. +// Also, if we have no data yet, we can stand some +// more bytes. This is to work around cases where hwm=0, +// such as the repl. Also, if the push() triggered a +// readable event, and the user called read(largeNumber) such that +// needReadable was set, then we ought to push more, so that another +// 'readable' event will be triggered. +function needMoreData(state) { + return !state.ended && (state.needReadable || state.length < state.highWaterMark || state.length === 0); +} + +// backwards compatibility. +Readable.prototype.setEncoding = function (enc) { + if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder; + this._readableState.decoder = new StringDecoder(enc); + this._readableState.encoding = enc; + return this; +}; + +// Don't raise the hwm > 8MB +var MAX_HWM = 0x800000; +function computeNewHighWaterMark(n) { + if (n >= MAX_HWM) { + n = MAX_HWM; + } else { + // Get the next highest power of 2 + n--; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + n++; + } + return n; +} + +function howMuchToRead(n, state) { + if (state.length === 0 && state.ended) return 0; + + if (state.objectMode) return n === 0 ? 0 : 1; + + if (n === null || isNaN(n)) { + // only flow one buffer at a time + if (state.flowing && state.buffer.length) return state.buffer[0].length;else return state.length; + } + + if (n <= 0) return 0; + + // If we're asking for more than the target buffer level, + // then raise the water mark. Bump up to the next highest + // power of 2, to prevent increasing it excessively in tiny + // amounts. + if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n); + + // don't have that much. return null, unless we've ended. + if (n > state.length) { + if (!state.ended) { + state.needReadable = true; + return 0; + } else { + return state.length; + } + } + + return n; +} + +// you can override either this method, or the async _read(n) below. +Readable.prototype.read = function (n) { + debug('read', n); + var state = this._readableState; + var nOrig = n; + + if (typeof n !== 'number' || n > 0) state.emittedReadable = false; + + // if we're doing read(0) to trigger a readable event, but we + // already have a bunch of data in the buffer, then just trigger + // the 'readable' event and move on. + if (n === 0 && state.needReadable && (state.length >= state.highWaterMark || state.ended)) { + debug('read: emitReadable', state.length, state.ended); + if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this); + return null; + } + + n = howMuchToRead(n, state); + + // if we've ended, and we're now clear, then finish it up. + if (n === 0 && state.ended) { + if (state.length === 0) endReadable(this); + return null; + } + + // All the actual chunk generation logic needs to be + // *below* the call to _read. The reason is that in certain + // synthetic stream cases, such as passthrough streams, _read + // may be a completely synchronous operation which may change + // the state of the read buffer, providing enough data when + // before there was *not* enough. + // + // So, the steps are: + // 1. Figure out what the state of things will be after we do + // a read from the buffer. + // + // 2. If that resulting state will trigger a _read, then call _read. + // Note that this may be asynchronous, or synchronous. Yes, it is + // deeply ugly to write APIs this way, but that still doesn't mean + // that the Readable class should behave improperly, as streams are + // designed to be sync/async agnostic. + // Take note if the _read call is sync or async (ie, if the read call + // has returned yet), so that we know whether or not it's safe to emit + // 'readable' etc. + // + // 3. Actually pull the requested chunks out of the buffer and return. + + // if we need a readable event, then we need to do some reading. + var doRead = state.needReadable; + debug('need readable', doRead); + + // if we currently have less than the highWaterMark, then also read some + if (state.length === 0 || state.length - n < state.highWaterMark) { + doRead = true; + debug('length less than watermark', doRead); + } + + // however, if we've ended, then there's no point, and if we're already + // reading, then it's unnecessary. + if (state.ended || state.reading) { + doRead = false; + debug('reading or ended', doRead); + } + + if (doRead) { + debug('do read'); + state.reading = true; + state.sync = true; + // if the length is currently zero, then we *need* a readable event. + if (state.length === 0) state.needReadable = true; + // call internal read method + this._read(state.highWaterMark); + state.sync = false; + } + + // If _read pushed data synchronously, then `reading` will be false, + // and we need to re-evaluate how much data we can return to the user. + if (doRead && !state.reading) n = howMuchToRead(nOrig, state); + + var ret; + if (n > 0) ret = fromList(n, state);else ret = null; + + if (ret === null) { + state.needReadable = true; + n = 0; + } + + state.length -= n; + + // If we have nothing in the buffer, then we want to know + // as soon as we *do* get something into the buffer. + if (state.length === 0 && !state.ended) state.needReadable = true; + + // If we tried to read() past the EOF, then emit end on the next tick. + if (nOrig !== n && state.ended && state.length === 0) endReadable(this); + + if (ret !== null) this.emit('data', ret); + + return ret; +}; + +function chunkInvalid(state, chunk) { + var er = null; + if (!Buffer.isBuffer(chunk) && typeof chunk !== 'string' && chunk !== null && chunk !== undefined && !state.objectMode) { + er = new TypeError('Invalid non-string/buffer chunk'); + } + return er; +} + +function onEofChunk(stream, state) { + if (state.ended) return; + if (state.decoder) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) { + state.buffer.push(chunk); + state.length += state.objectMode ? 1 : chunk.length; + } + } + state.ended = true; + + // emit 'readable' now to make sure it gets picked up. + emitReadable(stream); +} + +// Don't emit readable right away in sync mode, because this can trigger +// another read() call => stack overflow. This way, it might trigger +// a nextTick recursion warning, but that's not so bad. +function emitReadable(stream) { + var state = stream._readableState; + state.needReadable = false; + if (!state.emittedReadable) { + debug('emitReadable', state.flowing); + state.emittedReadable = true; + if (state.sync) processNextTick(emitReadable_, stream);else emitReadable_(stream); + } +} + +function emitReadable_(stream) { + debug('emit readable'); + stream.emit('readable'); + flow(stream); +} + +// at this point, the user has presumably seen the 'readable' event, +// and called read() to consume some data. that may have triggered +// in turn another _read(n) call, in which case reading = true if +// it's in progress. +// However, if we're not ended, or reading, and the length < hwm, +// then go ahead and try to read some more preemptively. +function maybeReadMore(stream, state) { + if (!state.readingMore) { + state.readingMore = true; + processNextTick(maybeReadMore_, stream, state); + } +} + +function maybeReadMore_(stream, state) { + var len = state.length; + while (!state.reading && !state.flowing && !state.ended && state.length < state.highWaterMark) { + debug('maybeReadMore read 0'); + stream.read(0); + if (len === state.length) + // didn't get any data, stop spinning. + break;else len = state.length; + } + state.readingMore = false; +} + +// abstract method. to be overridden in specific implementation classes. +// call cb(er, data) where data is <= n in length. +// for virtual (non-string, non-buffer) streams, "length" is somewhat +// arbitrary, and perhaps not very meaningful. +Readable.prototype._read = function (n) { + this.emit('error', new Error('not implemented')); +}; + +Readable.prototype.pipe = function (dest, pipeOpts) { + var src = this; + var state = this._readableState; + + switch (state.pipesCount) { + case 0: + state.pipes = dest; + break; + case 1: + state.pipes = [state.pipes, dest]; + break; + default: + state.pipes.push(dest); + break; + } + state.pipesCount += 1; + debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts); + + var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr; + + var endFn = doEnd ? onend : cleanup; + if (state.endEmitted) processNextTick(endFn);else src.once('end', endFn); + + dest.on('unpipe', onunpipe); + function onunpipe(readable) { + debug('onunpipe'); + if (readable === src) { + cleanup(); + } + } + + function onend() { + debug('onend'); + dest.end(); + } + + // when the dest drains, it reduces the awaitDrain counter + // on the source. This would be more elegant with a .once() + // handler in flow(), but adding and removing repeatedly is + // too slow. + var ondrain = pipeOnDrain(src); + dest.on('drain', ondrain); + + var cleanedUp = false; + function cleanup() { + debug('cleanup'); + // cleanup event handlers once the pipe is broken + dest.removeListener('close', onclose); + dest.removeListener('finish', onfinish); + dest.removeListener('drain', ondrain); + dest.removeListener('error', onerror); + dest.removeListener('unpipe', onunpipe); + src.removeListener('end', onend); + src.removeListener('end', cleanup); + src.removeListener('data', ondata); + + cleanedUp = true; + + // if the reader is waiting for a drain event from this + // specific writer, then it would cause it to never start + // flowing again. + // So, if this is awaiting a drain, then we just call it now. + // If we don't know, then assume that we are waiting for one. + if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain(); + } + + src.on('data', ondata); + function ondata(chunk) { + debug('ondata'); + var ret = dest.write(chunk); + if (false === ret) { + // If the user unpiped during `dest.write()`, it is possible + // to get stuck in a permanently paused state if that write + // also returned false. + if (state.pipesCount === 1 && state.pipes[0] === dest && src.listenerCount('data') === 1 && !cleanedUp) { + debug('false write response, pause', src._readableState.awaitDrain); + src._readableState.awaitDrain++; + } + src.pause(); + } + } + + // if the dest has an error, then stop piping into it. + // however, don't suppress the throwing behavior for this. + function onerror(er) { + debug('onerror', er); + unpipe(); + dest.removeListener('error', onerror); + if (EElistenerCount(dest, 'error') === 0) dest.emit('error', er); + } + // This is a brutally ugly hack to make sure that our error handler + // is attached before any userland ones. NEVER DO THIS. + if (!dest._events || !dest._events.error) dest.on('error', onerror);else if (isArray(dest._events.error)) dest._events.error.unshift(onerror);else dest._events.error = [onerror, dest._events.error]; + + // Both close and finish should trigger unpipe, but only once. + function onclose() { + dest.removeListener('finish', onfinish); + unpipe(); + } + dest.once('close', onclose); + function onfinish() { + debug('onfinish'); + dest.removeListener('close', onclose); + unpipe(); + } + dest.once('finish', onfinish); + + function unpipe() { + debug('unpipe'); + src.unpipe(dest); + } + + // tell the dest that it's being piped to + dest.emit('pipe', src); + + // start the flow if it hasn't been started already. + if (!state.flowing) { + debug('pipe resume'); + src.resume(); + } + + return dest; +}; + +function pipeOnDrain(src) { + return function () { + var state = src._readableState; + debug('pipeOnDrain', state.awaitDrain); + if (state.awaitDrain) state.awaitDrain--; + if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) { + state.flowing = true; + flow(src); + } + }; +} + +Readable.prototype.unpipe = function (dest) { + var state = this._readableState; + + // if we're not piping anywhere, then do nothing. + if (state.pipesCount === 0) return this; + + // just one destination. most common case. + if (state.pipesCount === 1) { + // passed in one, but it's not the right one. + if (dest && dest !== state.pipes) return this; + + if (!dest) dest = state.pipes; + + // got a match. + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + if (dest) dest.emit('unpipe', this); + return this; + } + + // slow case. multiple pipe destinations. + + if (!dest) { + // remove all. + var dests = state.pipes; + var len = state.pipesCount; + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + + for (var _i = 0; _i < len; _i++) { + dests[_i].emit('unpipe', this); + }return this; + } + + // try to find the right one. + var i = indexOf(state.pipes, dest); + if (i === -1) return this; + + state.pipes.splice(i, 1); + state.pipesCount -= 1; + if (state.pipesCount === 1) state.pipes = state.pipes[0]; + + dest.emit('unpipe', this); + + return this; +}; + +// set up data events if they are asked for +// Ensure readable listeners eventually get something +Readable.prototype.on = function (ev, fn) { + var res = Stream.prototype.on.call(this, ev, fn); + + // If listening to data, and it has not explicitly been paused, + // then call resume to start the flow of data on the next tick. + if (ev === 'data' && false !== this._readableState.flowing) { + this.resume(); + } + + if (ev === 'readable' && !this._readableState.endEmitted) { + var state = this._readableState; + if (!state.readableListening) { + state.readableListening = true; + state.emittedReadable = false; + state.needReadable = true; + if (!state.reading) { + processNextTick(nReadingNextTick, this); + } else if (state.length) { + emitReadable(this, state); + } + } + } + + return res; +}; +Readable.prototype.addListener = Readable.prototype.on; + +function nReadingNextTick(self) { + debug('readable nexttick read 0'); + self.read(0); +} + +// pause() and resume() are remnants of the legacy readable stream API +// If the user uses them, then switch into old mode. +Readable.prototype.resume = function () { + var state = this._readableState; + if (!state.flowing) { + debug('resume'); + state.flowing = true; + resume(this, state); + } + return this; +}; + +function resume(stream, state) { + if (!state.resumeScheduled) { + state.resumeScheduled = true; + processNextTick(resume_, stream, state); + } +} + +function resume_(stream, state) { + if (!state.reading) { + debug('resume read 0'); + stream.read(0); + } + + state.resumeScheduled = false; + stream.emit('resume'); + flow(stream); + if (state.flowing && !state.reading) stream.read(0); +} + +Readable.prototype.pause = function () { + debug('call pause flowing=%j', this._readableState.flowing); + if (false !== this._readableState.flowing) { + debug('pause'); + this._readableState.flowing = false; + this.emit('pause'); + } + return this; +}; + +function flow(stream) { + var state = stream._readableState; + debug('flow', state.flowing); + if (state.flowing) { + do { + var chunk = stream.read(); + } while (null !== chunk && state.flowing); + } +} + +// wrap an old-style stream as the async data source. +// This is *not* part of the readable stream interface. +// It is an ugly unfortunate mess of history. +Readable.prototype.wrap = function (stream) { + var state = this._readableState; + var paused = false; + + var self = this; + stream.on('end', function () { + debug('wrapped end'); + if (state.decoder && !state.ended) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) self.push(chunk); + } + + self.push(null); + }); + + stream.on('data', function (chunk) { + debug('wrapped data'); + if (state.decoder) chunk = state.decoder.write(chunk); + + // don't skip over falsy values in objectMode + if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return; + + var ret = self.push(chunk); + if (!ret) { + paused = true; + stream.pause(); + } + }); + + // proxy all the other methods. + // important when wrapping filters and duplexes. + for (var i in stream) { + if (this[i] === undefined && typeof stream[i] === 'function') { + this[i] = function (method) { + return function () { + return stream[method].apply(stream, arguments); + }; + }(i); + } + } + + // proxy certain important events. + var events = ['error', 'close', 'destroy', 'pause', 'resume']; + forEach(events, function (ev) { + stream.on(ev, self.emit.bind(self, ev)); + }); + + // when we try to consume some more bytes, simply unpause the + // underlying stream. + self._read = function (n) { + debug('wrapped _read', n); + if (paused) { + paused = false; + stream.resume(); + } + }; + + return self; +}; + +// exposed for testing purposes only. +Readable._fromList = fromList; + +// Pluck off n bytes from an array of buffers. +// Length is the combined lengths of all the buffers in the list. +function fromList(n, state) { + var list = state.buffer; + var length = state.length; + var stringMode = !!state.decoder; + var objectMode = !!state.objectMode; + var ret; + + // nothing in the list, definitely empty. + if (list.length === 0) return null; + + if (length === 0) ret = null;else if (objectMode) ret = list.shift();else if (!n || n >= length) { + // read it all, truncate the array. + if (stringMode) ret = list.join('');else if (list.length === 1) ret = list[0];else ret = Buffer.concat(list, length); + list.length = 0; + } else { + // read just some of it. + if (n < list[0].length) { + // just take a part of the first list item. + // slice is the same for buffers and strings. + var buf = list[0]; + ret = buf.slice(0, n); + list[0] = buf.slice(n); + } else if (n === list[0].length) { + // first list is a perfect match + ret = list.shift(); + } else { + // complex case. + // we have enough to cover it, but it spans past the first buffer. + if (stringMode) ret = '';else ret = new Buffer(n); + + var c = 0; + for (var i = 0, l = list.length; i < l && c < n; i++) { + var buf = list[0]; + var cpy = Math.min(n - c, buf.length); + + if (stringMode) ret += buf.slice(0, cpy);else buf.copy(ret, c, 0, cpy); + + if (cpy < buf.length) list[0] = buf.slice(cpy);else list.shift(); + + c += cpy; + } + } + } + + return ret; +} + +function endReadable(stream) { + var state = stream._readableState; + + // If we get here before consuming all the bytes, then that is a + // bug in node. Should never happen. + if (state.length > 0) throw new Error('endReadable called on non-empty stream'); + + if (!state.endEmitted) { + state.ended = true; + processNextTick(endReadableNT, state, stream); + } +} + +function endReadableNT(state, stream) { + // Check that we didn't get one last unshift. + if (!state.endEmitted && state.length === 0) { + state.endEmitted = true; + stream.readable = false; + stream.emit('end'); + } +} + +function forEach(xs, f) { + for (var i = 0, l = xs.length; i < l; i++) { + f(xs[i], i); + } +} + +function indexOf(xs, x) { + for (var i = 0, l = xs.length; i < l; i++) { + if (xs[i] === x) return i; + } + return -1; +}
\ No newline at end of file diff --git a/node_modules/bl/node_modules/readable-stream/lib/_stream_transform.js b/node_modules/bl/node_modules/readable-stream/lib/_stream_transform.js new file mode 100644 index 000000000..625cdc176 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/lib/_stream_transform.js @@ -0,0 +1,180 @@ +// a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + +'use strict'; + +module.exports = Transform; + +var Duplex = require('./_stream_duplex'); + +/*<replacement>*/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/*</replacement>*/ + +util.inherits(Transform, Duplex); + +function TransformState(stream) { + this.afterTransform = function (er, data) { + return afterTransform(stream, er, data); + }; + + this.needTransform = false; + this.transforming = false; + this.writecb = null; + this.writechunk = null; + this.writeencoding = null; +} + +function afterTransform(stream, er, data) { + var ts = stream._transformState; + ts.transforming = false; + + var cb = ts.writecb; + + if (!cb) return stream.emit('error', new Error('no writecb in Transform class')); + + ts.writechunk = null; + ts.writecb = null; + + if (data !== null && data !== undefined) stream.push(data); + + cb(er); + + var rs = stream._readableState; + rs.reading = false; + if (rs.needReadable || rs.length < rs.highWaterMark) { + stream._read(rs.highWaterMark); + } +} + +function Transform(options) { + if (!(this instanceof Transform)) return new Transform(options); + + Duplex.call(this, options); + + this._transformState = new TransformState(this); + + // when the writable side finishes, then flush out anything remaining. + var stream = this; + + // start out asking for a readable event once data is transformed. + this._readableState.needReadable = true; + + // we have implemented the _read method, and done the other things + // that Readable wants before the first _read call, so unset the + // sync guard flag. + this._readableState.sync = false; + + if (options) { + if (typeof options.transform === 'function') this._transform = options.transform; + + if (typeof options.flush === 'function') this._flush = options.flush; + } + + this.once('prefinish', function () { + if (typeof this._flush === 'function') this._flush(function (er) { + done(stream, er); + });else done(stream); + }); +} + +Transform.prototype.push = function (chunk, encoding) { + this._transformState.needTransform = false; + return Duplex.prototype.push.call(this, chunk, encoding); +}; + +// This is the part where you do stuff! +// override this function in implementation classes. +// 'chunk' is an input chunk. +// +// Call `push(newChunk)` to pass along transformed output +// to the readable side. You may call 'push' zero or more times. +// +// Call `cb(err)` when you are done with this chunk. If you pass +// an error, then that'll put the hurt on the whole operation. If you +// never call cb(), then you'll never get another chunk. +Transform.prototype._transform = function (chunk, encoding, cb) { + throw new Error('not implemented'); +}; + +Transform.prototype._write = function (chunk, encoding, cb) { + var ts = this._transformState; + ts.writecb = cb; + ts.writechunk = chunk; + ts.writeencoding = encoding; + if (!ts.transforming) { + var rs = this._readableState; + if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark); + } +}; + +// Doesn't matter what the args are here. +// _transform does all the work. +// That we got here means that the readable side wants more data. +Transform.prototype._read = function (n) { + var ts = this._transformState; + + if (ts.writechunk !== null && ts.writecb && !ts.transforming) { + ts.transforming = true; + this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform); + } else { + // mark that we need a transform, so that any data that comes in + // will get processed, now that we've asked for it. + ts.needTransform = true; + } +}; + +function done(stream, er) { + if (er) return stream.emit('error', er); + + // if there's nothing in the write buffer, then that means + // that nothing more will ever be provided + var ws = stream._writableState; + var ts = stream._transformState; + + if (ws.length) throw new Error('calling transform done when ws.length != 0'); + + if (ts.transforming) throw new Error('calling transform done when still transforming'); + + return stream.push(null); +}
\ No newline at end of file diff --git a/node_modules/bl/node_modules/readable-stream/lib/_stream_writable.js b/node_modules/bl/node_modules/readable-stream/lib/_stream_writable.js new file mode 100644 index 000000000..95916c992 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/lib/_stream_writable.js @@ -0,0 +1,516 @@ +// A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + +'use strict'; + +module.exports = Writable; + +/*<replacement>*/ +var processNextTick = require('process-nextick-args'); +/*</replacement>*/ + +/*<replacement>*/ +var asyncWrite = !process.browser && ['v0.10', 'v0.9.'].indexOf(process.version.slice(0, 5)) > -1 ? setImmediate : processNextTick; +/*</replacement>*/ + +/*<replacement>*/ +var Buffer = require('buffer').Buffer; +/*</replacement>*/ + +Writable.WritableState = WritableState; + +/*<replacement>*/ +var util = require('core-util-is'); +util.inherits = require('inherits'); +/*</replacement>*/ + +/*<replacement>*/ +var internalUtil = { + deprecate: require('util-deprecate') +}; +/*</replacement>*/ + +/*<replacement>*/ +var Stream; +(function () { + try { + Stream = require('st' + 'ream'); + } catch (_) {} finally { + if (!Stream) Stream = require('events').EventEmitter; + } +})(); +/*</replacement>*/ + +var Buffer = require('buffer').Buffer; + +util.inherits(Writable, Stream); + +function nop() {} + +function WriteReq(chunk, encoding, cb) { + this.chunk = chunk; + this.encoding = encoding; + this.callback = cb; + this.next = null; +} + +var Duplex; +function WritableState(options, stream) { + Duplex = Duplex || require('./_stream_duplex'); + + options = options || {}; + + // object stream flag to indicate whether or not this stream + // contains buffers or objects. + this.objectMode = !!options.objectMode; + + if (stream instanceof Duplex) this.objectMode = this.objectMode || !!options.writableObjectMode; + + // the point at which write() starts returning false + // Note: 0 is a valid value, means that we always return false if + // the entire buffer is not flushed immediately on write() + var hwm = options.highWaterMark; + var defaultHwm = this.objectMode ? 16 : 16 * 1024; + this.highWaterMark = hwm || hwm === 0 ? hwm : defaultHwm; + + // cast to ints. + this.highWaterMark = ~ ~this.highWaterMark; + + this.needDrain = false; + // at the start of calling end() + this.ending = false; + // when end() has been called, and returned + this.ended = false; + // when 'finish' is emitted + this.finished = false; + + // should we decode strings into buffers before passing to _write? + // this is here so that some node-core streams can optimize string + // handling at a lower level. + var noDecode = options.decodeStrings === false; + this.decodeStrings = !noDecode; + + // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + this.defaultEncoding = options.defaultEncoding || 'utf8'; + + // not an actual buffer we keep track of, but a measurement + // of how much we're waiting to get pushed to some underlying + // socket or file. + this.length = 0; + + // a flag to see when we're in the middle of a write. + this.writing = false; + + // when true all writes will be buffered until .uncork() call + this.corked = 0; + + // a flag to be able to tell if the onwrite cb is called immediately, + // or on a later tick. We set this to true at first, because any + // actions that shouldn't happen until "later" should generally also + // not happen before the first write call. + this.sync = true; + + // a flag to know if we're processing previously buffered items, which + // may call the _write() callback in the same tick, so that we don't + // end up in an overlapped onwrite situation. + this.bufferProcessing = false; + + // the callback that's passed to _write(chunk,cb) + this.onwrite = function (er) { + onwrite(stream, er); + }; + + // the callback that the user supplies to write(chunk,encoding,cb) + this.writecb = null; + + // the amount that is being written when _write is called. + this.writelen = 0; + + this.bufferedRequest = null; + this.lastBufferedRequest = null; + + // number of pending user-supplied write callbacks + // this must be 0 before 'finish' can be emitted + this.pendingcb = 0; + + // emit prefinish if the only thing we're waiting for is _write cbs + // This is relevant for synchronous Transform streams + this.prefinished = false; + + // True if the error was already emitted and should not be thrown again + this.errorEmitted = false; + + // count buffered requests + this.bufferedRequestCount = 0; + + // create the two objects needed to store the corked requests + // they are not a linked list, as no new elements are inserted in there + this.corkedRequestsFree = new CorkedRequest(this); + this.corkedRequestsFree.next = new CorkedRequest(this); +} + +WritableState.prototype.getBuffer = function writableStateGetBuffer() { + var current = this.bufferedRequest; + var out = []; + while (current) { + out.push(current); + current = current.next; + } + return out; +}; + +(function () { + try { + Object.defineProperty(WritableState.prototype, 'buffer', { + get: internalUtil.deprecate(function () { + return this.getBuffer(); + }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.') + }); + } catch (_) {} +})(); + +var Duplex; +function Writable(options) { + Duplex = Duplex || require('./_stream_duplex'); + + // Writable ctor is applied to Duplexes, though they're not + // instanceof Writable, they're instanceof Readable. + if (!(this instanceof Writable) && !(this instanceof Duplex)) return new Writable(options); + + this._writableState = new WritableState(options, this); + + // legacy. + this.writable = true; + + if (options) { + if (typeof options.write === 'function') this._write = options.write; + + if (typeof options.writev === 'function') this._writev = options.writev; + } + + Stream.call(this); +} + +// Otherwise people can pipe Writable streams, which is just wrong. +Writable.prototype.pipe = function () { + this.emit('error', new Error('Cannot pipe. Not readable.')); +}; + +function writeAfterEnd(stream, cb) { + var er = new Error('write after end'); + // TODO: defer error events consistently everywhere, not just the cb + stream.emit('error', er); + processNextTick(cb, er); +} + +// If we get something that is not a buffer, string, null, or undefined, +// and we're not in objectMode, then that's an error. +// Otherwise stream chunks are all considered to be of length=1, and the +// watermarks determine how many objects to keep in the buffer, rather than +// how many bytes or characters. +function validChunk(stream, state, chunk, cb) { + var valid = true; + + if (!Buffer.isBuffer(chunk) && typeof chunk !== 'string' && chunk !== null && chunk !== undefined && !state.objectMode) { + var er = new TypeError('Invalid non-string/buffer chunk'); + stream.emit('error', er); + processNextTick(cb, er); + valid = false; + } + return valid; +} + +Writable.prototype.write = function (chunk, encoding, cb) { + var state = this._writableState; + var ret = false; + + if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (Buffer.isBuffer(chunk)) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding; + + if (typeof cb !== 'function') cb = nop; + + if (state.ended) writeAfterEnd(this, cb);else if (validChunk(this, state, chunk, cb)) { + state.pendingcb++; + ret = writeOrBuffer(this, state, chunk, encoding, cb); + } + + return ret; +}; + +Writable.prototype.cork = function () { + var state = this._writableState; + + state.corked++; +}; + +Writable.prototype.uncork = function () { + var state = this._writableState; + + if (state.corked) { + state.corked--; + + if (!state.writing && !state.corked && !state.finished && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state); + } +}; + +Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) { + // node::ParseEncoding() requires lower case. + if (typeof encoding === 'string') encoding = encoding.toLowerCase(); + if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new TypeError('Unknown encoding: ' + encoding); + this._writableState.defaultEncoding = encoding; +}; + +function decodeChunk(state, chunk, encoding) { + if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') { + chunk = new Buffer(chunk, encoding); + } + return chunk; +} + +// if we're already writing something, then just put this +// in the queue, and wait our turn. Otherwise, call _write +// If we return false, then we need a drain event, so set that flag. +function writeOrBuffer(stream, state, chunk, encoding, cb) { + chunk = decodeChunk(state, chunk, encoding); + + if (Buffer.isBuffer(chunk)) encoding = 'buffer'; + var len = state.objectMode ? 1 : chunk.length; + + state.length += len; + + var ret = state.length < state.highWaterMark; + // we must ensure that previous needDrain will not be reset to false. + if (!ret) state.needDrain = true; + + if (state.writing || state.corked) { + var last = state.lastBufferedRequest; + state.lastBufferedRequest = new WriteReq(chunk, encoding, cb); + if (last) { + last.next = state.lastBufferedRequest; + } else { + state.bufferedRequest = state.lastBufferedRequest; + } + state.bufferedRequestCount += 1; + } else { + doWrite(stream, state, false, len, chunk, encoding, cb); + } + + return ret; +} + +function doWrite(stream, state, writev, len, chunk, encoding, cb) { + state.writelen = len; + state.writecb = cb; + state.writing = true; + state.sync = true; + if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite); + state.sync = false; +} + +function onwriteError(stream, state, sync, er, cb) { + --state.pendingcb; + if (sync) processNextTick(cb, er);else cb(er); + + stream._writableState.errorEmitted = true; + stream.emit('error', er); +} + +function onwriteStateUpdate(state) { + state.writing = false; + state.writecb = null; + state.length -= state.writelen; + state.writelen = 0; +} + +function onwrite(stream, er) { + var state = stream._writableState; + var sync = state.sync; + var cb = state.writecb; + + onwriteStateUpdate(state); + + if (er) onwriteError(stream, state, sync, er, cb);else { + // Check if we're actually ready to finish, but don't emit yet + var finished = needFinish(state); + + if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) { + clearBuffer(stream, state); + } + + if (sync) { + /*<replacement>*/ + asyncWrite(afterWrite, stream, state, finished, cb); + /*</replacement>*/ + } else { + afterWrite(stream, state, finished, cb); + } + } +} + +function afterWrite(stream, state, finished, cb) { + if (!finished) onwriteDrain(stream, state); + state.pendingcb--; + cb(); + finishMaybe(stream, state); +} + +// Must force callback to be called on nextTick, so that we don't +// emit 'drain' before the write() consumer gets the 'false' return +// value, and has a chance to attach a 'drain' listener. +function onwriteDrain(stream, state) { + if (state.length === 0 && state.needDrain) { + state.needDrain = false; + stream.emit('drain'); + } +} + +// if there's something in the buffer waiting, then process it +function clearBuffer(stream, state) { + state.bufferProcessing = true; + var entry = state.bufferedRequest; + + if (stream._writev && entry && entry.next) { + // Fast case, write everything using _writev() + var l = state.bufferedRequestCount; + var buffer = new Array(l); + var holder = state.corkedRequestsFree; + holder.entry = entry; + + var count = 0; + while (entry) { + buffer[count] = entry; + entry = entry.next; + count += 1; + } + + doWrite(stream, state, true, state.length, buffer, '', holder.finish); + + // doWrite is always async, defer these to save a bit of time + // as the hot path ends with doWrite + state.pendingcb++; + state.lastBufferedRequest = null; + state.corkedRequestsFree = holder.next; + holder.next = null; + } else { + // Slow case, write chunks one-by-one + while (entry) { + var chunk = entry.chunk; + var encoding = entry.encoding; + var cb = entry.callback; + var len = state.objectMode ? 1 : chunk.length; + + doWrite(stream, state, false, len, chunk, encoding, cb); + entry = entry.next; + // if we didn't call the onwrite immediately, then + // it means that we need to wait until it does. + // also, that means that the chunk and cb are currently + // being processed, so move the buffer counter past them. + if (state.writing) { + break; + } + } + + if (entry === null) state.lastBufferedRequest = null; + } + + state.bufferedRequestCount = 0; + state.bufferedRequest = entry; + state.bufferProcessing = false; +} + +Writable.prototype._write = function (chunk, encoding, cb) { + cb(new Error('not implemented')); +}; + +Writable.prototype._writev = null; + +Writable.prototype.end = function (chunk, encoding, cb) { + var state = this._writableState; + + if (typeof chunk === 'function') { + cb = chunk; + chunk = null; + encoding = null; + } else if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); + + // .end() fully uncorks + if (state.corked) { + state.corked = 1; + this.uncork(); + } + + // ignore unnecessary end() calls. + if (!state.ending && !state.finished) endWritable(this, state, cb); +}; + +function needFinish(state) { + return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing; +} + +function prefinish(stream, state) { + if (!state.prefinished) { + state.prefinished = true; + stream.emit('prefinish'); + } +} + +function finishMaybe(stream, state) { + var need = needFinish(state); + if (need) { + if (state.pendingcb === 0) { + prefinish(stream, state); + state.finished = true; + stream.emit('finish'); + } else { + prefinish(stream, state); + } + } + return need; +} + +function endWritable(stream, state, cb) { + state.ending = true; + finishMaybe(stream, state); + if (cb) { + if (state.finished) processNextTick(cb);else stream.once('finish', cb); + } + state.ended = true; + stream.writable = false; +} + +// It seems a linked list but it is not +// there will be only 2 of these for each stream +function CorkedRequest(state) { + var _this = this; + + this.next = null; + this.entry = null; + + this.finish = function (err) { + var entry = _this.entry; + _this.entry = null; + while (entry) { + var cb = entry.callback; + state.pendingcb--; + cb(err); + entry = entry.next; + } + if (state.corkedRequestsFree) { + state.corkedRequestsFree.next = _this; + } else { + state.corkedRequestsFree = _this; + } + }; +}
\ No newline at end of file diff --git a/node_modules/bl/node_modules/readable-stream/package.json b/node_modules/bl/node_modules/readable-stream/package.json new file mode 100644 index 000000000..538d68c65 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/package.json @@ -0,0 +1,113 @@ +{ + "_args": [ + [ + { + "raw": "readable-stream@~2.0.5", + "scope": null, + "escapedName": "readable-stream", + "name": "readable-stream", + "rawSpec": "~2.0.5", + "spec": ">=2.0.5 <2.1.0", + "type": "range" + }, + "/home/dold/repos/taler/wallet-webex/node_modules/bl" + ] + ], + "_from": "readable-stream@>=2.0.5 <2.1.0", + "_id": "readable-stream@2.0.6", + "_inCache": true, + "_location": "/bl/readable-stream", + "_nodeVersion": "5.7.0", + "_npmOperationalInternal": { + "host": "packages-12-west.internal.npmjs.com", + "tmp": "tmp/readable-stream-2.0.6.tgz_1457893507709_0.369257491780445" + }, + "_npmUser": { + "name": "cwmma", + "email": "calvin.metcalf@gmail.com" + }, + "_npmVersion": "3.6.0", + "_phantomChildren": {}, + "_requested": { + "raw": "readable-stream@~2.0.5", + "scope": null, + "escapedName": "readable-stream", + "name": "readable-stream", + "rawSpec": "~2.0.5", + "spec": ">=2.0.5 <2.1.0", + "type": "range" + }, + "_requiredBy": [ + "/bl" + ], + "_resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.0.6.tgz", + "_shasum": "8f90341e68a53ccc928788dacfcd11b36eb9b78e", + "_shrinkwrap": null, + "_spec": "readable-stream@~2.0.5", + "_where": "/home/dold/repos/taler/wallet-webex/node_modules/bl", + "browser": { + "util": false + }, + "bugs": { + "url": "https://github.com/nodejs/readable-stream/issues" + }, + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "~1.0.0", + "process-nextick-args": "~1.0.6", + "string_decoder": "~0.10.x", + "util-deprecate": "~1.0.1" + }, + "description": "Streams3, a user-land copy of the stream library from Node.js", + "devDependencies": { + "tap": "~0.2.6", + "tape": "~4.5.1", + "zuul": "~3.9.0" + }, + "directories": {}, + "dist": { + "shasum": "8f90341e68a53ccc928788dacfcd11b36eb9b78e", + "tarball": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.0.6.tgz" + }, + "gitHead": "01fb5608a970b42c900b96746cadc13d27dd9d7e", + "homepage": "https://github.com/nodejs/readable-stream#readme", + "keywords": [ + "readable", + "stream", + "pipe" + ], + "license": "MIT", + "main": "readable.js", + "maintainers": [ + { + "name": "isaacs", + "email": "isaacs@npmjs.com" + }, + { + "name": "tootallnate", + "email": "nathan@tootallnate.net" + }, + { + "name": "rvagg", + "email": "rod@vagg.org" + }, + { + "name": "cwmma", + "email": "calvin.metcalf@gmail.com" + } + ], + "name": "readable-stream", + "optionalDependencies": {}, + "readme": "ERROR: No README data found!", + "repository": { + "type": "git", + "url": "git://github.com/nodejs/readable-stream.git" + }, + "scripts": { + "browser": "npm run write-zuul && zuul -- test/browser.js", + "test": "tap test/parallel/*.js test/ours/*.js", + "write-zuul": "printf \"ui: tape\nbrowsers:\n - name: $BROWSER_NAME\n version: $BROWSER_VERSION\n\">.zuul.yml" + }, + "version": "2.0.6" +} diff --git a/node_modules/bl/node_modules/readable-stream/passthrough.js b/node_modules/bl/node_modules/readable-stream/passthrough.js new file mode 100644 index 000000000..27e8d8a55 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/passthrough.js @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_passthrough.js") diff --git a/node_modules/bl/node_modules/readable-stream/readable.js b/node_modules/bl/node_modules/readable-stream/readable.js new file mode 100644 index 000000000..6222a5798 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/readable.js @@ -0,0 +1,12 @@ +var Stream = (function (){ + try { + return require('st' + 'ream'); // hack to fix a circular dependency issue when used with browserify + } catch(_){} +}()); +exports = module.exports = require('./lib/_stream_readable.js'); +exports.Stream = Stream || exports; +exports.Readable = exports; +exports.Writable = require('./lib/_stream_writable.js'); +exports.Duplex = require('./lib/_stream_duplex.js'); +exports.Transform = require('./lib/_stream_transform.js'); +exports.PassThrough = require('./lib/_stream_passthrough.js'); diff --git a/node_modules/bl/node_modules/readable-stream/transform.js b/node_modules/bl/node_modules/readable-stream/transform.js new file mode 100644 index 000000000..5d482f078 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/transform.js @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_transform.js") diff --git a/node_modules/bl/node_modules/readable-stream/writable.js b/node_modules/bl/node_modules/readable-stream/writable.js new file mode 100644 index 000000000..e1e9efdf3 --- /dev/null +++ b/node_modules/bl/node_modules/readable-stream/writable.js @@ -0,0 +1 @@ +module.exports = require("./lib/_stream_writable.js") diff --git a/node_modules/bl/package.json b/node_modules/bl/package.json new file mode 100644 index 000000000..be0973556 --- /dev/null +++ b/node_modules/bl/package.json @@ -0,0 +1,103 @@ +{ + "_args": [ + [ + { + "raw": "bl@^1.0.0", + "scope": null, + "escapedName": "bl", + "name": "bl", + "rawSpec": "^1.0.0", + "spec": ">=1.0.0 <2.0.0", + "type": "range" + }, + "/home/dold/repos/taler/wallet-webex/node_modules/tar-stream" + ] + ], + "_from": "bl@>=1.0.0 <2.0.0", + "_id": "bl@1.1.2", + "_inCache": true, + "_location": "/bl", + "_nodeVersion": "5.3.0", + "_npmOperationalInternal": { + "host": "packages-9-west.internal.npmjs.com", + "tmp": "tmp/bl-1.1.2.tgz_1455246621698_0.6300242957659066" + }, + "_npmUser": { + "name": "rvagg", + "email": "rod@vagg.org" + }, + "_npmVersion": "3.3.12", + "_phantomChildren": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "process-nextick-args": "1.0.7", + "string_decoder": "0.10.31", + "util-deprecate": "1.0.2" + }, + "_requested": { + "raw": "bl@^1.0.0", + "scope": null, + "escapedName": "bl", + "name": "bl", + "rawSpec": "^1.0.0", + "spec": ">=1.0.0 <2.0.0", + "type": "range" + }, + "_requiredBy": [ + "/tar-stream" + ], + "_resolved": "https://registry.npmjs.org/bl/-/bl-1.1.2.tgz", + "_shasum": "fdca871a99713aa00d19e3bbba41c44787a65398", + "_shrinkwrap": null, + "_spec": "bl@^1.0.0", + "_where": "/home/dold/repos/taler/wallet-webex/node_modules/tar-stream", + "authors": [ + "Rod Vagg <rod@vagg.org> (https://github.com/rvagg)", + "Matteo Collina <matteo.collina@gmail.com> (https://github.com/mcollina)", + "Jarett Cruger <jcrugzz@gmail.com> (https://github.com/jcrugzz)" + ], + "bugs": { + "url": "https://github.com/rvagg/bl/issues" + }, + "dependencies": { + "readable-stream": "~2.0.5" + }, + "description": "Buffer List: collect buffers and access with a standard readable Buffer interface, streamable too!", + "devDependencies": { + "faucet": "0.0.1", + "hash_file": "~0.1.1", + "tape": "~4.4.0" + }, + "directories": {}, + "dist": { + "shasum": "fdca871a99713aa00d19e3bbba41c44787a65398", + "tarball": "https://registry.npmjs.org/bl/-/bl-1.1.2.tgz" + }, + "gitHead": "ea42021059dc65fc60d7f4b9217c73431f09d23d", + "homepage": "https://github.com/rvagg/bl", + "keywords": [ + "buffer", + "buffers", + "stream", + "awesomesauce" + ], + "license": "MIT", + "main": "bl.js", + "maintainers": [ + { + "name": "rvagg", + "email": "rod@vagg.org" + } + ], + "name": "bl", + "optionalDependencies": {}, + "readme": "ERROR: No README data found!", + "repository": { + "type": "git", + "url": "git+https://github.com/rvagg/bl.git" + }, + "scripts": { + "test": "node test/test.js | faucet" + }, + "version": "1.1.2" +} diff --git a/node_modules/bl/test/test.js b/node_modules/bl/test/test.js new file mode 100644 index 000000000..c95b1ba48 --- /dev/null +++ b/node_modules/bl/test/test.js @@ -0,0 +1,640 @@ +var tape = require('tape') + , crypto = require('crypto') + , fs = require('fs') + , hash = require('hash_file') + , BufferList = require('../') + + , encodings = + ('hex utf8 utf-8 ascii binary base64' + + (process.browser ? '' : ' ucs2 ucs-2 utf16le utf-16le')).split(' ') + +tape('single bytes from single buffer', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abcd')) + + t.equal(bl.length, 4) + + t.equal(bl.get(0), 97) + t.equal(bl.get(1), 98) + t.equal(bl.get(2), 99) + t.equal(bl.get(3), 100) + + t.end() +}) + +tape('single bytes from multiple buffers', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.length, 10) + + t.equal(bl.get(0), 97) + t.equal(bl.get(1), 98) + t.equal(bl.get(2), 99) + t.equal(bl.get(3), 100) + t.equal(bl.get(4), 101) + t.equal(bl.get(5), 102) + t.equal(bl.get(6), 103) + t.equal(bl.get(7), 104) + t.equal(bl.get(8), 105) + t.equal(bl.get(9), 106) + t.end() +}) + +tape('multi bytes from single buffer', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abcd')) + + t.equal(bl.length, 4) + + t.equal(bl.slice(0, 4).toString('ascii'), 'abcd') + t.equal(bl.slice(0, 3).toString('ascii'), 'abc') + t.equal(bl.slice(1, 4).toString('ascii'), 'bcd') + + t.end() +}) + +tape('multiple bytes from multiple buffers', function (t) { + var bl = new BufferList() + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + + t.end() +}) + +tape('multiple bytes from multiple buffer lists', function (t) { + var bl = new BufferList() + + bl.append(new BufferList([ new Buffer('abcd'), new Buffer('efg') ])) + bl.append(new BufferList([ new Buffer('hi'), new Buffer('j') ])) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + + t.end() +}) + +// same data as previous test, just using nested constructors +tape('multiple bytes from crazy nested buffer lists', function (t) { + var bl = new BufferList() + + bl.append(new BufferList([ + new BufferList([ + new BufferList(new Buffer('abc')) + , new Buffer('d') + , new BufferList(new Buffer('efg')) + ]) + , new BufferList([ new Buffer('hi') ]) + , new BufferList(new Buffer('j')) + ])) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + + t.equal(bl.slice(3, 10).toString('ascii'), 'defghij') + t.equal(bl.slice(3, 6).toString('ascii'), 'def') + t.equal(bl.slice(3, 8).toString('ascii'), 'defgh') + t.equal(bl.slice(5, 10).toString('ascii'), 'fghij') + + t.end() +}) + +tape('append accepts arrays of Buffers', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abc')) + bl.append([ new Buffer('def') ]) + bl.append([ new Buffer('ghi'), new Buffer('jkl') ]) + bl.append([ new Buffer('mnop'), new Buffer('qrstu'), new Buffer('vwxyz') ]) + t.equal(bl.length, 26) + t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') + t.end() +}) + +tape('append accepts arrays of BufferLists', function (t) { + var bl = new BufferList() + bl.append(new Buffer('abc')) + bl.append([ new BufferList('def') ]) + bl.append(new BufferList([ new Buffer('ghi'), new BufferList('jkl') ])) + bl.append([ new Buffer('mnop'), new BufferList([ new Buffer('qrstu'), new Buffer('vwxyz') ]) ]) + t.equal(bl.length, 26) + t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') + t.end() +}) + +tape('append chainable', function (t) { + var bl = new BufferList() + t.ok(bl.append(new Buffer('abcd')) === bl) + t.ok(bl.append([ new Buffer('abcd') ]) === bl) + t.ok(bl.append(new BufferList(new Buffer('abcd'))) === bl) + t.ok(bl.append([ new BufferList(new Buffer('abcd')) ]) === bl) + t.end() +}) + +tape('append chainable (test results)', function (t) { + var bl = new BufferList('abc') + .append([ new BufferList('def') ]) + .append(new BufferList([ new Buffer('ghi'), new BufferList('jkl') ])) + .append([ new Buffer('mnop'), new BufferList([ new Buffer('qrstu'), new Buffer('vwxyz') ]) ]) + + t.equal(bl.length, 26) + t.equal(bl.slice().toString('ascii'), 'abcdefghijklmnopqrstuvwxyz') + t.end() +}) + +tape('consuming from multiple buffers', function (t) { + var bl = new BufferList() + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.length, 10) + + t.equal(bl.slice(0, 10).toString('ascii'), 'abcdefghij') + + bl.consume(3) + t.equal(bl.length, 7) + t.equal(bl.slice(0, 7).toString('ascii'), 'defghij') + + bl.consume(2) + t.equal(bl.length, 5) + t.equal(bl.slice(0, 5).toString('ascii'), 'fghij') + + bl.consume(1) + t.equal(bl.length, 4) + t.equal(bl.slice(0, 4).toString('ascii'), 'ghij') + + bl.consume(1) + t.equal(bl.length, 3) + t.equal(bl.slice(0, 3).toString('ascii'), 'hij') + + bl.consume(2) + t.equal(bl.length, 1) + t.equal(bl.slice(0, 1).toString('ascii'), 'j') + + t.end() +}) + +tape('complete consumption', function (t) { + var bl = new BufferList() + + bl.append(new Buffer('a')) + bl.append(new Buffer('b')) + + bl.consume(2) + + t.equal(bl.length, 0) + t.equal(bl._bufs.length, 0) + + t.end() +}) + +tape('test readUInt8 / readInt8', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt8(2), 0x3) + t.equal(bl.readInt8(2), 0x3) + t.equal(bl.readUInt8(3), 0x4) + t.equal(bl.readInt8(3), 0x4) + t.equal(bl.readUInt8(4), 0x23) + t.equal(bl.readInt8(4), 0x23) + t.equal(bl.readUInt8(5), 0x42) + t.equal(bl.readInt8(5), 0x42) + t.end() +}) + +tape('test readUInt16LE / readUInt16BE / readInt16LE / readInt16BE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt16BE(2), 0x0304) + t.equal(bl.readUInt16LE(2), 0x0403) + t.equal(bl.readInt16BE(2), 0x0304) + t.equal(bl.readInt16LE(2), 0x0403) + t.equal(bl.readUInt16BE(3), 0x0423) + t.equal(bl.readUInt16LE(3), 0x2304) + t.equal(bl.readInt16BE(3), 0x0423) + t.equal(bl.readInt16LE(3), 0x2304) + t.equal(bl.readUInt16BE(4), 0x2342) + t.equal(bl.readUInt16LE(4), 0x4223) + t.equal(bl.readInt16BE(4), 0x2342) + t.equal(bl.readInt16LE(4), 0x4223) + t.end() +}) + +tape('test readUInt32LE / readUInt32BE / readInt32LE / readInt32BE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x3 + buf2[2] = 0x4 + buf3[0] = 0x23 + buf3[1] = 0x42 + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readUInt32BE(2), 0x03042342) + t.equal(bl.readUInt32LE(2), 0x42230403) + t.equal(bl.readInt32BE(2), 0x03042342) + t.equal(bl.readInt32LE(2), 0x42230403) + t.end() +}) + +tape('test readFloatLE / readFloatBE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(3) + , bl = new BufferList() + + buf2[1] = 0x00 + buf2[2] = 0x00 + buf3[0] = 0x80 + buf3[1] = 0x3f + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readFloatLE(2), 0x01) + t.end() +}) + +tape('test readDoubleLE / readDoubleBE', function (t) { + var buf1 = new Buffer(1) + , buf2 = new Buffer(3) + , buf3 = new Buffer(10) + , bl = new BufferList() + + buf2[1] = 0x55 + buf2[2] = 0x55 + buf3[0] = 0x55 + buf3[1] = 0x55 + buf3[2] = 0x55 + buf3[3] = 0x55 + buf3[4] = 0xd5 + buf3[5] = 0x3f + + bl.append(buf1) + bl.append(buf2) + bl.append(buf3) + + t.equal(bl.readDoubleLE(2), 0.3333333333333333) + t.end() +}) + +tape('test toString', function (t) { + var bl = new BufferList() + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + + t.equal(bl.toString('ascii', 0, 10), 'abcdefghij') + t.equal(bl.toString('ascii', 3, 10), 'defghij') + t.equal(bl.toString('ascii', 3, 6), 'def') + t.equal(bl.toString('ascii', 3, 8), 'defgh') + t.equal(bl.toString('ascii', 5, 10), 'fghij') + + t.end() +}) + +tape('test toString encoding', function (t) { + var bl = new BufferList() + , b = new Buffer('abcdefghij\xff\x00') + + bl.append(new Buffer('abcd')) + bl.append(new Buffer('efg')) + bl.append(new Buffer('hi')) + bl.append(new Buffer('j')) + bl.append(new Buffer('\xff\x00')) + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc), enc) + }) + + t.end() +}) + +!process.browser && tape('test stream', function (t) { + var random = crypto.randomBytes(65534) + , rndhash = hash(random, 'md5') + , md5sum = crypto.createHash('md5') + , bl = new BufferList(function (err, buf) { + t.ok(Buffer.isBuffer(buf)) + t.ok(err === null) + t.equal(rndhash, hash(bl.slice(), 'md5')) + t.equal(rndhash, hash(buf, 'md5')) + + bl.pipe(fs.createWriteStream('/tmp/bl_test_rnd_out.dat')) + .on('close', function () { + var s = fs.createReadStream('/tmp/bl_test_rnd_out.dat') + s.on('data', md5sum.update.bind(md5sum)) + s.on('end', function() { + t.equal(rndhash, md5sum.digest('hex'), 'woohoo! correct hash!') + t.end() + }) + }) + + }) + + fs.writeFileSync('/tmp/bl_test_rnd.dat', random) + fs.createReadStream('/tmp/bl_test_rnd.dat').pipe(bl) +}) + +tape('instantiation with Buffer', function (t) { + var buf = crypto.randomBytes(1024) + , buf2 = crypto.randomBytes(1024) + , b = BufferList(buf) + + t.equal(buf.toString('hex'), b.slice().toString('hex'), 'same buffer') + b = BufferList([ buf, buf2 ]) + t.equal(b.slice().toString('hex'), Buffer.concat([ buf, buf2 ]).toString('hex'), 'same buffer') + t.end() +}) + +tape('test String appendage', function (t) { + var bl = new BufferList() + , b = new Buffer('abcdefghij\xff\x00') + + bl.append('abcd') + bl.append('efg') + bl.append('hi') + bl.append('j') + bl.append('\xff\x00') + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc)) + }) + + t.end() +}) + +tape('test Number appendage', function (t) { + var bl = new BufferList() + , b = new Buffer('1234567890') + + bl.append(1234) + bl.append(567) + bl.append(89) + bl.append(0) + + encodings.forEach(function (enc) { + t.equal(bl.toString(enc), b.toString(enc)) + }) + + t.end() +}) + +tape('write nothing, should get empty buffer', function (t) { + t.plan(3) + BufferList(function (err, data) { + t.notOk(err, 'no error') + t.ok(Buffer.isBuffer(data), 'got a buffer') + t.equal(0, data.length, 'got a zero-length buffer') + t.end() + }).end() +}) + +tape('unicode string', function (t) { + t.plan(2) + var inp1 = '\u2600' + , inp2 = '\u2603' + , exp = inp1 + ' and ' + inp2 + , bl = BufferList() + bl.write(inp1) + bl.write(' and ') + bl.write(inp2) + t.equal(exp, bl.toString()) + t.equal(new Buffer(exp).toString('hex'), bl.toString('hex')) +}) + +tape('should emit finish', function (t) { + var source = BufferList() + , dest = BufferList() + + source.write('hello') + source.pipe(dest) + + dest.on('finish', function () { + t.equal(dest.toString('utf8'), 'hello') + t.end() + }) +}) + +tape('basic copy', function (t) { + var buf = crypto.randomBytes(1024) + , buf2 = new Buffer(1024) + , b = BufferList(buf) + + b.copy(buf2) + t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy after many appends', function (t) { + var buf = crypto.randomBytes(512) + , buf2 = new Buffer(1024) + , b = BufferList(buf) + + b.append(buf) + b.copy(buf2) + t.equal(b.slice().toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy at a precise position', function (t) { + var buf = crypto.randomBytes(1004) + , buf2 = new Buffer(1024) + , b = BufferList(buf) + + b.copy(buf2, 20) + t.equal(b.slice().toString('hex'), buf2.slice(20).toString('hex'), 'same buffer') + t.end() +}) + +tape('copy starting from a precise location', function (t) { + var buf = crypto.randomBytes(10) + , buf2 = new Buffer(5) + , b = BufferList(buf) + + b.copy(buf2, 0, 5) + t.equal(b.slice(5).toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy in an interval', function (t) { + var rnd = crypto.randomBytes(10) + , b = BufferList(rnd) // put the random bytes there + , actual = new Buffer(3) + , expected = new Buffer(3) + + rnd.copy(expected, 0, 5, 8) + b.copy(actual, 0, 5, 8) + + t.equal(actual.toString('hex'), expected.toString('hex'), 'same buffer') + t.end() +}) + +tape('copy an interval between two buffers', function (t) { + var buf = crypto.randomBytes(10) + , buf2 = new Buffer(10) + , b = BufferList(buf) + + b.append(buf) + b.copy(buf2, 0, 5, 15) + + t.equal(b.slice(5, 15).toString('hex'), buf2.toString('hex'), 'same buffer') + t.end() +}) + +tape('duplicate', function (t) { + t.plan(2) + + var bl = new BufferList('abcdefghij\xff\x00') + , dup = bl.duplicate() + + t.equal(bl.prototype, dup.prototype) + t.equal(bl.toString('hex'), dup.toString('hex')) +}) + +tape('destroy no pipe', function (t) { + t.plan(2) + + var bl = new BufferList('alsdkfja;lsdkfja;lsdk') + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) +}) + +!process.browser && tape('destroy with pipe before read end', function (t) { + t.plan(2) + + var bl = new BufferList() + fs.createReadStream(__dirname + '/test.js') + .pipe(bl) + + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + +}) + +!process.browser && tape('destroy with pipe before read end with race', function (t) { + t.plan(2) + + var bl = new BufferList() + fs.createReadStream(__dirname + '/test.js') + .pipe(bl) + + setTimeout(function () { + bl.destroy() + setTimeout(function () { + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + }, 500) + }, 500) +}) + +!process.browser && tape('destroy with pipe after read end', function (t) { + t.plan(2) + + var bl = new BufferList() + fs.createReadStream(__dirname + '/test.js') + .on('end', onEnd) + .pipe(bl) + + function onEnd () { + bl.destroy() + + t.equal(bl._bufs.length, 0) + t.equal(bl.length, 0) + } +}) + +!process.browser && tape('destroy with pipe while writing to a destination', function (t) { + t.plan(4) + + var bl = new BufferList() + , ds = new BufferList() + + fs.createReadStream(__dirname + '/test.js') + .on('end', onEnd) + .pipe(bl) + + function onEnd () { + bl.pipe(ds) + + setTimeout(function () { + bl.destroy() + + t.equals(bl._bufs.length, 0) + t.equals(bl.length, 0) + + ds.destroy() + + t.equals(bl._bufs.length, 0) + t.equals(bl.length, 0) + + }, 100) + } +}) + +!process.browser && tape('handle error', function (t) { + t.plan(2) + fs.createReadStream('/does/not/exist').pipe(BufferList(function (err, data) { + t.ok(err instanceof Error, 'has error') + t.notOk(data, 'no data') + })) +}) |