diff options
author | Florian Dold <florian.dold@gmail.com> | 2016-10-10 03:43:44 +0200 |
---|---|---|
committer | Florian Dold <florian.dold@gmail.com> | 2016-10-10 03:43:44 +0200 |
commit | abd94a7f5a50f43c797a11b53549ae48fff667c3 (patch) | |
tree | ab8ed457f65cdd72e13e0571d2975729428f1551 /node_modules/gettext-parser | |
parent | a0247c6a3fd6a09a41a7e35a3441324c4dcb58be (diff) |
add node_modules to address #4364
Diffstat (limited to 'node_modules/gettext-parser')
27 files changed, 2438 insertions, 0 deletions
diff --git a/node_modules/gettext-parser/.jshintrc b/node_modules/gettext-parser/.jshintrc new file mode 100644 index 000000000..5a681e854 --- /dev/null +++ b/node_modules/gettext-parser/.jshintrc @@ -0,0 +1,18 @@ +{ + "indent": 4, + "node": true, + "globalstrict": true, + "evil": true, + "unused": true, + "undef": true, + "newcap": true, + "esnext": true, + "curly": true, + "eqeqeq": true, + "expr": true, + + "predef": [ + "describe", + "it" + ] +}
\ No newline at end of file diff --git a/node_modules/gettext-parser/.npmignore b/node_modules/gettext-parser/.npmignore new file mode 100644 index 000000000..092c1b16f --- /dev/null +++ b/node_modules/gettext-parser/.npmignore @@ -0,0 +1,16 @@ +lib-cov +*.seed +*.log +*.csv +*.dat +*.out +*.pid +*.gz + +pids +logs +results + +npm-debug.log +node_modules +.DS_Store
\ No newline at end of file diff --git a/node_modules/gettext-parser/.travis.yml b/node_modules/gettext-parser/.travis.yml new file mode 100644 index 000000000..3232a957e --- /dev/null +++ b/node_modules/gettext-parser/.travis.yml @@ -0,0 +1,20 @@ +language: node_js +node_js: + - "0.10" + - "0.11" + +before_install: + - npm install -g grunt-cli + +notifications: + email: + recipients: + - andris@kreata.ee + on_success: change + on_failure: change + webhooks: + urls: + - https://webhooks.gitter.im/e/0ed18fd9b3e529b3c2cc + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: false # default: false
\ No newline at end of file diff --git a/node_modules/gettext-parser/CHANGELOG.md b/node_modules/gettext-parser/CHANGELOG.md new file mode 100644 index 000000000..5c1bb2c16 --- /dev/null +++ b/node_modules/gettext-parser/CHANGELOG.md @@ -0,0 +1,22 @@ +# Changelog + +## v1.1.0 2015-01-21 + + * Added `po.createParseStream` method for parsing PO files from a Stream source + * Updated documentation + +## v1.0.0 2015-01-21 + + * Bumped version to 1.0.0 to be compatible with semver + * Changed tests from nodeunit to mocha + * Unified code style in files and added jshint task to check it + * Added Grunt support to check style and run tests on `npm test` + +## v0.2.0 2013-12-30 + + * Bumped version to 0.2.0 + * Removed node-iconv dependency + * Fixed a global variable leak (`line` was not defined in `pocompiler._addPOString`) + * Some code maintenance (applied jshint rules, added "use strict" statements) + * Updated e-mail address in .travis.yml + * Added CHANGELOG file diff --git a/node_modules/gettext-parser/Gruntfile.js b/node_modules/gettext-parser/Gruntfile.js new file mode 100644 index 000000000..4b4a65234 --- /dev/null +++ b/node_modules/gettext-parser/Gruntfile.js @@ -0,0 +1,30 @@ +'use strict'; + +module.exports = function(grunt) { + + // Project configuration. + grunt.initConfig({ + jshint: { + all: ['src/*.js', 'test/*.js', 'index.js'], + options: { + jshintrc: '.jshintrc' + } + }, + + mochaTest: { + all: { + options: { + reporter: 'spec' + }, + src: ['test/*-test.js'] + } + } + }); + + // Load the plugin(s) + grunt.loadNpmTasks('grunt-contrib-jshint'); + grunt.loadNpmTasks('grunt-mocha-test'); + + // Tasks + grunt.registerTask('default', ['jshint', 'mochaTest']); +};
\ No newline at end of file diff --git a/node_modules/gettext-parser/LICENSE b/node_modules/gettext-parser/LICENSE new file mode 100644 index 000000000..411b8d132 --- /dev/null +++ b/node_modules/gettext-parser/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014-2015 Andris Reinman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/node_modules/gettext-parser/README.md b/node_modules/gettext-parser/README.md new file mode 100644 index 000000000..7418f8388 --- /dev/null +++ b/node_modules/gettext-parser/README.md @@ -0,0 +1,205 @@ +gettext-parser +============== + +[![Build Status](https://secure.travis-ci.org/andris9/gettext-parser.png)](http://travis-ci.org/andris9/gettext-parser) +[![NPM version](https://badge.fury.io/js/gettext-parser.png)](http://badge.fury.io/js/gettext-parser) + +Parse and compile gettext *po* and *mo* files with node.js, nothing more, nothing less. + +This module is slightly based on my other gettext related module [node-gettext](https://github.com/andris9/node-gettext). The plan is to move all parsing and compiling logic from node-gettext to here and leave only translation related functions (domains, plural handling, lookups etc.). + +## Usage + +Include the library: + + var gettextParser = require("gettext-parser"); + + +### Parse PO files + +Parse a PO file with + + gettextParser.po.parse(input[, defaultCharset]) → Object + +Where + + * **input** is a *po* file as a Buffer or an unicode string. Charset is converted to unicode from other encodings only if the input is a Buffer, otherwise the charset information is discarded + * **defaultCharset** is the charset to use if charset is not defined or is the default `"CHARSET"` (applies only if *input* is a Buffer) + +Method returns gettext-parser specific translation object (see below) + +**Example** + +```javascript +var input = require('fs').readFileSync('en.po'); +var po = gettextParser.po.parse(input); +console.log(po.translations['']); // output translations for the default context +``` + +### Parse PO as a Stream + +PO files can also be parsed from a stream source. After all input is processed the parser emits a single 'data' event which contains the parsed translation object. + + gettextParser.po.createParseStream([defaultCharset][, streamOptions]) → Transform Stream + +Where + + * **defaultCharset** is the charset to use if charset is not defined or is the default `"CHARSET"` + * **streamOptions** are the standard stream options + +**Example** + +```javascript +var input = require('fs').createReadStream('en.po'); +var po = gettextParser.po.createParseStream(); +input.pipe(po); +po.on('data', function(data){ + console.log(data.translations['']); // output translations for the default context +}); +``` + +### Compile PO from a translation object + +If you have a translation object you can convert this to a valid PO file with + + gettextParser.po.compile(data) → Buffer + +Where + + * **data** is a translation object either got from parsing a PO/MO file or composed by other means + +**Example** + +```javascript +var data = { + ... +}; +var output = gettextParser.po.compile(data); +require('fs').writeFileSync(output); +``` + +### Parse MO files + +Parse a MO file with + + gettextParser.mo.parse(input[, defaultCharset]) → Object + +Where + + * **input** is a *mo* file as a Buffer + * **defaultCharset** is the charset to use if charset is not defined or is the default `"CHARSET"` + +Method returns gettext-parser specific translation object (see below) + +**Example** + +```javascript +var input = require('fs').readFileSync('en.mo'); +var mo = gettextParser.mo.parse(input); +console.log(mo.translations['']); // output translations for the default context +``` + +### Compile MO from a translation object + +If you have a translation object you can convert this to a valid MO file with + + gettextParser.mo.compile(data) → Buffer + +Where + + * **data** is a translation object either got from parsing a PO/MO file or composed by other means + +**Example** + +```javascript +var data = { + ... +}; +var output = gettextParser.mo.compile(data); +require('fs').writeFileSync(output); +``` + +### Notes + +#### Overriding charset + +If you are compiling a previously parsed translation object, you can override the output charset with the `charset` property (applies both for compiling *mo* and *po* files). + +```javascript +var obj = gettextParser.po.parse(inputBuf); +obj.charset = "windows-1257"; +outputBuf = gettextParser.po.compile(obj); +``` + +Headers for the output are modified to match the updated charset. + +#### ICONV support + +By default *gettext-parser* uses pure JS [iconv-lite](https://github.com/ashtuchkin/iconv-lite) for encoding and decoding non UTF-8 charsets. If you need to support more complex encodings that are not supported by *iconv-lite*, you need to add [iconv](https://github.com/bnoordhuis/node-iconv) as an additional dependency for your project (*gettext-parser* will detect if it is available and tries to use it instead of *iconv-lite*). + +## Data structure of parsed mo/po files + +### Character set + +Parsed data is always in unicode but the original charset of the file can +be found from the `charset` property. This value is also used when compiling translations +to a *mo* or *po* file. + +### Headers + +Headers can be found from the `headers` object, all keys are lowercase and the value for a key is a string. This value will also be used when compiling. + +### Translations + +Translations can be found from the `translations` object which in turn holds context objects for `msgctx`. Default context can be found from `translations[""]`. + +Context objects include all the translations, where `msgid` value is the key. The value is an object with the following possible properties: + + * **msgctx** context for this translation, if not present the default context applies + * **msgid** string to be translated + * **msgid_plural** the plural form of the original string (might not be present) + * **msgstr** an array of translations + * **comments** an object with the following properties: `translator`, `reference`, `extracted`, `flag`, `previous`. + +Example + +```json +{ + "charset": "iso-8859-1", + + "headers": { + "content-type": "text/plain; charset=iso-8859-1", + "plural-forms": "nplurals=2; plural=(n!=1);" + }, + + "translations": { + "": { + "": { + "msgid": "", + "msgstr": ["Content-Type: text/plain; charset=iso-8859-1\n..."] + } + } + }, + + "another context": { + "%s example": { + "msgctx": "another context", + "msgid": "%s example", + "msgid_plural": "%s examples", + "msgstr": ["% näide", "%s näidet"], + "comments": { + "translator": "This is regular comment", + "reference": "/path/to/file:123" + } + } + } +} +``` + +Notice that the structure has both a `headers` object and a `""` translation with the header string. When compiling the structure to a *mo* or a *po* file, the `headers` object is used to define the header. Header string in the `""` translation is just for reference (includes the original unmodified data) but will not be used when compiling. So if you need to add or alter header values, use only the `headers` object. + +If you need to convert *gettext-parser* formatted translation object to something else, eg. for *jed*, check out [po2json](https://github.com/mikeedwards/po2json). + +## License + +**MIT** diff --git a/node_modules/gettext-parser/index.js b/node_modules/gettext-parser/index.js new file mode 100644 index 000000000..d02fbaeb7 --- /dev/null +++ b/node_modules/gettext-parser/index.js @@ -0,0 +1,16 @@ +'use strict'; + +var poParser = require('./lib/poparser'); + +module.exports = { + po: { + parse: poParser.parse, + createParseStream: poParser.stream, + compile: require('./lib/pocompiler') + }, + + mo: { + parse: require('./lib/moparser'), + compile: require('./lib/mocompiler') + } +};
\ No newline at end of file diff --git a/node_modules/gettext-parser/lib/mocompiler.js b/node_modules/gettext-parser/lib/mocompiler.js new file mode 100644 index 000000000..0e848c7fd --- /dev/null +++ b/node_modules/gettext-parser/lib/mocompiler.js @@ -0,0 +1,237 @@ +'use strict'; + +var encoding = require('encoding'); +var sharedFuncs = require('./shared'); + +/** + * Exposes general compiler function. Takes a translation + * object as a parameter and returns binary MO object + * + * @param {Object} table Translation object + * @return {Buffer} Compiled binary MO object + */ +module.exports = function(table) { + var compiler = new Compiler(table); + return compiler.compile(); +}; + +/** + * Creates a MO compiler object. + * + * @constructor + * @param {Object} table Translation table as defined in the README + */ +function Compiler(table) { + this._table = table || {}; + this._table.headers = this._table.headers || {}; + this._table.translations = this._table.translations || {}; + + this._translations = []; + + this._writeFunc = 'writeUInt32LE'; + + this._handleCharset(); +} + +/** + * Magic bytes for the generated binary data + */ +Compiler.prototype.MAGIC = 0x950412de; + +/** + * Handles header values, replaces or adds (if needed) a charset property + */ +Compiler.prototype._handleCharset = function() { + var parts = (this._table.headers['content-type'] || 'text/plain').split(';'), + contentType = parts.shift(), + charset = sharedFuncs.formatCharset(this._table.charset), + params = []; + + params = parts.map(function(part) { + var parts = part.split('='), + key = parts.shift().trim(), + value = parts.join('='); + + if (key.toLowerCase() === 'charset') { + if (!charset) { + charset = sharedFuncs.formatCharset(value.trim() || 'utf-8'); + } + return 'charset=' + charset; + } + + return part; + }); + + if (!charset) { + charset = this._table.charset || 'utf-8'; + params.push('charset=' + charset); + } + + this._table.charset = charset; + this._table.headers['content-type'] = contentType + '; ' + params.join('; '); + + this._charset = charset; +}; + +/** + * Generates an array of translation strings + * in the form of [{msgid:... , msgstr:...}] + * + * @return {Array} Translation strings array + */ +Compiler.prototype._generateList = function() { + var list = []; + + list.push({ + msgid: new Buffer(0), + msgstr: encoding.convert(sharedFuncs.generateHeader(this._table.headers), this._charset) + }); + + Object.keys(this._table.translations).forEach((function(msgctxt) { + if (typeof this._table.translations[msgctxt] !== 'object') { + return; + } + Object.keys(this._table.translations[msgctxt]).forEach((function(msgid) { + if (typeof this._table.translations[msgctxt][msgid] !== 'object') { + return; + } + if (msgctxt === '' && msgid === '') { + return; + } + + var msgid_plural = this._table.translations[msgctxt][msgid].msgid_plural, + key = msgid, + value; + + if (msgctxt) { + key = msgctxt + '\u0004' + key; + } + + if (msgid_plural) { + key += '\u0000' + msgid_plural; + } + + value = [].concat(this._table.translations[msgctxt][msgid].msgstr || []).join('\u0000'); + + list.push({ + msgid: encoding.convert(key, this._charset), + msgstr: encoding.convert(value, this._charset) + }); + }).bind(this)); + }).bind(this)); + + return list; +}; + +/** + * Calculate buffer size for the final binary object + * + * @param {Array} list An array of translation strings from _generateList + * @return {Object} Size data of {msgid, msgstr, total} + */ +Compiler.prototype._calculateSize = function(list) { + var msgidLength = 0, + msgstrLength = 0, + totalLength = 0; + + list.forEach(function(translation) { + msgidLength += translation.msgid.length + 1; // + extra 0x00 + msgstrLength += translation.msgstr.length + 1; // + extra 0x00 + }); + + totalLength = 4 + // magic number + 4 + // revision + 4 + // string count + 4 + // original string table offset + 4 + // translation string table offset + 4 + // hash table size + 4 + // hash table offset + (4 + 4) * list.length + // original string table + (4 + 4) * list.length + // translations string table + msgidLength + // originals + msgstrLength; // translations + + return { + msgid: msgidLength, + msgstr: msgstrLength, + total: totalLength + }; +}; + +/** + * Generates the binary MO object from the translation list + * + * @param {Array} list translation list + * @param {Object} size Byte size information + * @return {Buffer} Compiled MO object + */ +Compiler.prototype._build = function(list, size) { + var returnBuffer = new Buffer(size.total), + curPosition = 0, + i, len; + + // magic + returnBuffer[this._writeFunc](this.MAGIC, 0); + + // revision + returnBuffer[this._writeFunc](0, 4); + + // string count + returnBuffer[this._writeFunc](list.length, 8); + + // original string table offset + returnBuffer[this._writeFunc](28, 12); + + // translation string table offset + returnBuffer[this._writeFunc](28 + (4 + 4) * list.length, 16); + + // hash table size + returnBuffer[this._writeFunc](0, 20); + + // hash table offset + returnBuffer[this._writeFunc](28 + (4 + 4) * list.length, 24); + + // build originals table + curPosition = 28 + 2 * (4 + 4) * list.length; + for (i = 0, len = list.length; i < len; i++) { + list[i].msgid.copy(returnBuffer, curPosition); + returnBuffer[this._writeFunc](list[i].msgid.length, 28 + i * 8); + returnBuffer[this._writeFunc](curPosition, 28 + i * 8 + 4); + returnBuffer[curPosition + list[i].msgid.length] = 0x00; + curPosition += list[i].msgid.length + 1; + } + + // build translations table + for (i = 0, len = list.length; i < len; i++) { + list[i].msgstr.copy(returnBuffer, curPosition); + returnBuffer[this._writeFunc](list[i].msgstr.length, 28 + (4 + 4) * list.length + i * 8); + returnBuffer[this._writeFunc](curPosition, 28 + (4 + 4) * list.length + i * 8 + 4); + returnBuffer[curPosition + list[i].msgstr.length] = 0x00; + curPosition += list[i].msgstr.length + 1; + } + + return returnBuffer; +}; + +/** + * Compiles translation object into a binary MO object + * + * @return {Buffer} Compiled MO object + */ +Compiler.prototype.compile = function() { + var list = this._generateList(), + size = this._calculateSize(list); + + // sort by msgid + list.sort(function(a, b) { + if (a.msgid > b.msgid) { + return 1; + } + if (a.msgid < b.msgid) { + return -1; + } + return 0; + }); + + return this._build(list, size); +};
\ No newline at end of file diff --git a/node_modules/gettext-parser/lib/moparser.js b/node_modules/gettext-parser/lib/moparser.js new file mode 100644 index 000000000..8c204716b --- /dev/null +++ b/node_modules/gettext-parser/lib/moparser.js @@ -0,0 +1,202 @@ +'use strict'; + +var encoding = require('encoding'); +var sharedFuncs = require('./shared'); + +/** + * Parses a binary MO object into translation table + * + * @param {Buffer} buffer Binary MO object + * @param {String} [defaultCharset] Default charset to use + * @return {Object} Translation object + */ +module.exports = function(buffer, defaultCharset) { + var parser = new Parser(buffer, defaultCharset); + return parser.parse(); +}; + +/** + * Creates a MO parser object. + * + * @constructor + * @param {Buffer} fileContents Binary MO object + * @param {String} [defaultCharset] Default charset to use + */ +function Parser(fileContents, defaultCharset) { + + this._fileContents = fileContents; + + /** + * Method name for writing int32 values, default littleendian + */ + this._writeFunc = 'writeUInt32LE'; + + /** + * Method name for reading int32 values, default littleendian + */ + this._readFunc = 'readUInt32LE'; + + this._charset = defaultCharset || 'iso-8859-1'; + + this._table = { + charset: this._charset, + headers: undefined, + translations: {} + }; +} + +/** + * Magic constant to check the endianness of the input file + */ +Parser.prototype.MAGIC = 0x950412de; + +/** + * Checks if number values in the input file are in big- or littleendian format. + * + * @return {Boolean} Return true if magic was detected + */ +Parser.prototype._checkMagick = function() { + if (this._fileContents.readUInt32LE(0) === this.MAGIC) { + this._readFunc = 'readUInt32LE'; + this._writeFunc = 'writeUInt32LE'; + return true; + } else if (this._fileContents.readUInt32BE(0) === this.MAGIC) { + this._readFunc = 'readUInt32BE'; + this._writeFunc = 'writeUInt32BE'; + return true; + } else { + return false; + } +}; + +/** + * Read the original strings and translations from the input MO file. Use the + * first translation string in the file as the header. + */ +Parser.prototype._loadTranslationTable = function() { + var offsetOriginals = this._offsetOriginals, + offsetTranslations = this._offsetTranslations, + position, length, + msgid, msgstr; + + for (var i = 0; i < this._total; i++) { + // msgid string + length = this._fileContents[this._readFunc](offsetOriginals); + offsetOriginals += 4; + position = this._fileContents[this._readFunc](offsetOriginals); + offsetOriginals += 4; + msgid = this._fileContents.slice(position, position + length); + + // matching msgstr + length = this._fileContents[this._readFunc](offsetTranslations); + offsetTranslations += 4; + position = this._fileContents[this._readFunc](offsetTranslations); + offsetTranslations += 4; + msgstr = this._fileContents.slice(position, position + length); + + if (!i && !msgid.toString()) { + this._handleCharset(msgstr); + } + + msgid = encoding.convert(msgid, 'utf-8', this._charset).toString('utf-8'); + msgstr = encoding.convert(msgstr, 'utf-8', this._charset).toString('utf-8'); + + this._addString(msgid, msgstr); + } + + // dump the file contents object + this._fileContents = null; +}; + +/** + * Detects charset for MO strings from the header + * + * @param {Buffer} headers Header value + */ +Parser.prototype._handleCharset = function(headers) { + + var headersStr = headers.toString(), + match; + + if ((match = headersStr.match(/[; ]charset\s*=\s*([\w\-]+)/i))) { + this._charset = this._table.charset = sharedFuncs.formatCharset(match[1], this._charset); + } + + headers = encoding.convert(headers, 'utf-8', this._charset).toString('utf-8'); + + this._table.headers = sharedFuncs.parseHeader(headers); +}; + +/** + * Adds a translation to the translation object + * + * @param {String} msgid Original string + * @params {String} msgstr Translation for the original string + */ +Parser.prototype._addString = function(msgid, msgstr) { + var translation = {}, + parts, msgctxt, msgid_plural; + + msgid = msgid.split('\u0004'); + if (msgid.length > 1) { + msgctxt = msgid.shift(); + translation.msgctxt = msgctxt; + } else { + msgctxt = ''; + } + msgid = msgid.join('\u0004'); + + parts = msgid.split('\u0000'); + msgid = parts.shift(); + + translation.msgid = msgid; + + if ((msgid_plural = parts.join('\u0000'))) { + translation.msgid_plural = msgid_plural; + } + + msgstr = msgstr.split('\u0000'); + translation.msgstr = [].concat(msgstr || []); + + if (!this._table.translations[msgctxt]) { + this._table.translations[msgctxt] = {}; + } + + this._table.translations[msgctxt][msgid] = translation; +}; + +/** + * Parses the MO object and returns translation table + * + * @return {Object} Translation table + */ +Parser.prototype.parse = function() { + if (!this._checkMagick()) { + return false; + } + + /** + * GetText revision nr, usually 0 + */ + this._revision = this._fileContents[this._readFunc](4); + + /** + * Total count of translated strings + */ + this._total = this._fileContents[this._readFunc](8); + + /** + * Offset position for original strings table + */ + this._offsetOriginals = this._fileContents[this._readFunc](12); + + /** + * Offset position for translation strings table + */ + this._offsetTranslations = this._fileContents[this._readFunc](16); + + // Load translations into this._translationTable + this._loadTranslationTable(); + + return this._table; +};
\ No newline at end of file diff --git a/node_modules/gettext-parser/lib/pocompiler.js b/node_modules/gettext-parser/lib/pocompiler.js new file mode 100644 index 000000000..0f3c1a166 --- /dev/null +++ b/node_modules/gettext-parser/lib/pocompiler.js @@ -0,0 +1,225 @@ +'use strict'; + +var encoding = require('encoding'); +var sharedFuncs = require('./shared'); + +/** + * Exposes general compiler function. Takes a translation + * object as a parameter and returns PO object + * + * @param {Object} table Translation object + * @return {Buffer} Compiled PO object + */ +module.exports = function(table) { + var compiler = new Compiler(table); + return compiler.compile(); +}; + +/** + * Creates a PO compiler object. + * + * @constructor + * @param {Object} table Translation table to be compiled + */ +function Compiler(table) { + this._table = table || {}; + this._table.headers = this._table.headers || {}; + this._table.translations = this._table.translations || {}; + this._translations = []; + this._handleCharset(); +} + +/** + * Converts a comments object to a comment string. The comment object is + * in the form of {translator:'', reference: '', extracted: '', flag: '', previous:''} + * + * @param {Object} comments A comments object + * @return {String} A comment string for the PO file + */ +Compiler.prototype._drawComments = function(comments) { + var lines = []; + var types = [{ + key: 'translator', + prefix: '# ' + }, { + key: 'reference', + prefix: '#: ' + }, { + key: 'extracted', + prefix: '#. ' + }, { + key: 'flag', + prefix: '#, ' + }, { + key: 'previous', + prefix: '#| ' + }]; + + types.forEach(function(type) { + if (!comments[type.key]) { + return; + } + comments[type.key].split(/\r?\n|\r/).forEach(function(line) { + lines.push(type.prefix + line); + }); + }); + + return lines.join('\n'); +}; + +/** + * Builds a PO string for a single translation object + * + * @param {Object} block Translation object + * @param {Object} [override] Properties of this object will override `block` properties + * @return {String} Translation string for a single object + */ +Compiler.prototype._drawBlock = function(block, override) { + + override = override || {}; + + var response = [], + comments = override.comments || block.comments, + msgctxt = override.msgctxt || block.msgctxt, + msgid = override.msgid || block.msgid, + msgid_plural = override.msgid_plural || block.msgid_plural, + msgstr = [].concat(override.msgstr || block.msgstr); + + + // add comments + if (comments && (comments = this._drawComments(comments))) { + response.push(comments); + } + + if (msgctxt) { + response.push(this._addPOString('msgctxt', msgctxt)); + } + + response.push(this._addPOString('msgid', msgid || '')); + + if (msgid_plural) { + response.push(this._addPOString('msgid_plural', msgid_plural)); + } + + if (msgstr.length <= 1) { + response.push(this._addPOString('msgstr', msgstr[0] || '')); + } else { + msgstr.forEach((function(msgstr, i) { + response.push(this._addPOString('msgstr[' + i + ']', msgstr || '')); + }).bind(this)); + } + + return response.join('\n'); +}; + +/** + * Escapes and joins a key and a value for the PO string + * + * @param {String} key Key name + * @param {String} value Key value + * @return {String} Joined and escaped key-value pair + */ +Compiler.prototype._addPOString = function(key, value) { + var line; + + key = (key || '').toString(); + + // escape newlines and quotes + value = (value || '').toString(). + replace(/\\/g, '\\\\'). + replace(/"/g, '\\"'). + replace(/\t/g, '\\t'). + replace(/\r/g, '\\r'). + replace(/\n/g, '\\n'); + + var lines = sharedFuncs.foldLine(value); + + if (lines.length < 2) { + return key + ' "' + (lines.shift() || '') + '"'; + } else { + return key + ' ""\n"' + lines.join('"\n"') + '"'; + } + + if (value.match(/\n/)) { + value = value.replace(/\n/g, '\\n\n').replace(/\n$/, ''); + line = ('\n' + value).split('\n').map(function(l) { + return '"' + l + '"'; + }).join('\n'); + } else { + line = '"' + value + '"'; + } + + return key + ' ' + line; +}; + +/** + * Handles header values, replaces or adds (if needed) a charset property + */ +Compiler.prototype._handleCharset = function() { + var parts = (this._table.headers['content-type'] || 'text/plain').split(';'); + var contentType = parts.shift(); + var charset = sharedFuncs.formatCharset(this._table.charset); + var params = []; + + params = parts.map(function(part) { + var parts = part.split('='), + key = parts.shift().trim(), + value = parts.join('='); + + if (key.toLowerCase() === 'charset') { + if (!charset) { + charset = sharedFuncs.formatCharset(value.trim() || 'utf-8'); + } + return 'charset=' + charset; + } + + return part; + }); + + if (!charset) { + charset = this._table.charset || 'utf-8'; + params.push('charset=' + charset); + } + + this._table.charset = charset; + this._table.headers['content-type'] = contentType + '; ' + params.join('; '); + + this._charset = charset; +}; + +/** + * Compiles translation object into a PO object + * + * @return {Buffer} Compiled PO object + */ +Compiler.prototype.compile = function() { + + var response = [], + headerBlock = this._table.translations[''] && this._table.translations[''][''] || {}; + + response.push(this._drawBlock(headerBlock, { + msgstr: sharedFuncs.generateHeader(this._table.headers) + })); + + Object.keys(this._table.translations).forEach((function(msgctxt) { + if (typeof this._table.translations[msgctxt] !== 'object') { + return; + } + Object.keys(this._table.translations[msgctxt]).forEach((function(msgid) { + if (typeof this._table.translations[msgctxt][msgid] !== 'object') { + return; + } + if (msgctxt === '' && msgid === '') { + return; + } + + response.push(this._drawBlock(this._table.translations[msgctxt][msgid])); + }).bind(this)); + }).bind(this)); + + if (this._charset === 'utf-8' || this._charset === 'ascii') { + return new Buffer(response.join('\n\n'), 'utf-8'); + } else { + return encoding.convert(response.join('\n\n'), this._charset); + } +};
\ No newline at end of file diff --git a/node_modules/gettext-parser/lib/poparser.js b/node_modules/gettext-parser/lib/poparser.js new file mode 100644 index 000000000..e215bca08 --- /dev/null +++ b/node_modules/gettext-parser/lib/poparser.js @@ -0,0 +1,525 @@ +'use strict'; + +var encoding = require('encoding'); +var sharedFuncs = require('./shared'); +var Transform = require('stream').Transform; +var util = require('util'); + +/** + * Parses a PO object into translation table + * + * @param {Buffer|String} buffer PO object + * @param {String} [defaultCharset] Default charset to use + * @return {Object} Translation object + */ +module.exports.parse = function(buffer, defaultCharset) { + var parser = new Parser(buffer, defaultCharset); + return parser.parse(); +}; + +/** + * Parses a PO stream, emits translation table in object mode + * + * @param {String} [defaultCharset] Default charset to use + * @param {String} [options] Stream options + * @return {Stream} Transform stream + */ +module.exports.stream = function(defaultCharset, options) { + return new PoParserTransform(defaultCharset, options); +}; + +/** + * Creates a PO parser object. If PO object is a string, + * UTF-8 will be used as the charset + * + * @constructor + * @param {Buffer|String} fileContents PO object + * @param {String} [defaultCharset] Default charset to use + */ +function Parser(fileContents, defaultCharset) { + + this._charset = defaultCharset || 'iso-8859-1'; + + this._lex = []; + this._escaped = false; + this._node; + this._state = this.states.none; + + if (typeof fileContents === 'string') { + this._charset = 'utf-8'; + this._fileContents = fileContents; + } else { + this._handleCharset(fileContents); + } +} + +/** + * Parses the PO object and returns translation table + * + * @return {Object} Translation table + */ +Parser.prototype.parse = function() { + this._lexer(this._fileContents); + return this._finalize(this._lex); +}; + +/** + * Detects charset for PO strings from the header + * + * @param {Buffer} headers Header value + */ +Parser.prototype._handleCharset = function(buf) { + var str = (buf || '').toString(), + pos, headers = '', + match; + + if ((pos = str.search(/^\s*msgid/im)) >= 0) { + if ((pos = pos + str.substr(pos + 5).search(/^\s*(msgid|msgctxt)/im))) { + headers = str.substr(0, pos); + } + } + + if ((match = headers.match(/[; ]charset\s*=\s*([\w\-]+)(?:[\s;]|\\n)*"\s*$/mi))) { + this._charset = sharedFuncs.formatCharset(match[1], this._charset); + } + + if (this._charset === 'utf-8') { + this._fileContents = str; + } else { + this._fileContents = this._toString(buf); + } +}; + +Parser.prototype._toString = function(buf) { + return encoding.convert(buf, 'utf-8', this._charset).toString('utf-8'); +}; + +/** + * State constants for parsing FSM + */ +Parser.prototype.states = { + none: 0x01, + comments: 0x02, + key: 0x03, + string: 0x04 +}; + +/** + * Value types for lexer + */ +Parser.prototype.types = { + comments: 0x01, + key: 0x02, + string: 0x03 +}; + +/** + * String matches for lexer + */ +Parser.prototype.symbols = { + quotes: /["']/, + comments: /\#/, + whitespace: /\s/, + key: /[\w\-\[\]]/ +}; + +/** + * Token parser. Parsed state can be found from this._lex + * + * @param {String} chunk String + */ +Parser.prototype._lexer = function(chunk) { + var chr; + + for (var i = 0, len = chunk.length; i < len; i++) { + chr = chunk.charAt(i); + switch (this._state) { + case this.states.none: + if (chr.match(this.symbols.quotes)) { + this._node = { + type: this.types.string, + value: '', + quote: chr + }; + this._lex.push(this._node); + this._state = this.states.string; + } else if (chr.match(this.symbols.comments)) { + this._node = { + type: this.types.comments, + value: '' + }; + this._lex.push(this._node); + this._state = this.states.comments; + } else if (!chr.match(this.symbols.whitespace)) { + this._node = { + type: this.types.key, + value: chr + }; + this._lex.push(this._node); + this._state = this.states.key; + } + break; + case this.states.comments: + if (chr === '\n') { + this._state = this.states.none; + } else if (chr !== '\r') { + this._node.value += chr; + } + break; + case this.states.string: + if (this._escaped) { + switch (chr) { + case 't': + this._node.value += '\t'; + break; + case 'n': + this._node.value += '\n'; + break; + case 'r': + this._node.value += '\r'; + break; + default: + this._node.value += chr; + } + this._escaped = false; + } else { + if (chr === this._node.quote) { + this._state = this.states.none; + } else if (chr === '\\') { + this._escaped = true; + break; + } else { + this._node.value += chr; + } + this._escaped = false; + } + break; + case this.states.key: + if (!chr.match(this.symbols.key)) { + this._state = this.states.none; + i--; + } else { + this._node.value += chr; + } + break; + } + } +}; + +/** + * Join multi line strings + * + * @param {Object} tokens Parsed tokens + * @return {Object} Parsed tokens, with multi line strings joined into one + */ +Parser.prototype._joinStringValues = function(tokens) { + var lastNode, response = []; + + for (var i = 0, len = tokens.length; i < len; i++) { + if (lastNode && tokens[i].type === this.types.string && lastNode.type === this.types.string) { + lastNode.value += tokens[i].value; + } else if (lastNode && tokens[i].type === this.types.comments && lastNode.type === this.types.comments) { + lastNode.value += '\n' + tokens[i].value; + } else { + response.push(tokens[i]); + lastNode = tokens[i]; + } + } + + return response; +}; + +/** + * Parse comments into separate comment blocks + * + * @param {Object} tokens Parsed tokens + */ +Parser.prototype._parseComments = function(tokens) { + // parse comments + tokens.forEach((function(node) { + var comment, lines; + + if (node && node.type === this.types.comments) { + comment = { + translator: [], + extracted: [], + reference: [], + flag: [], + previous: [] + }; + lines = (node.value || '').split(/\n/); + lines.forEach(function(line) { + switch (line.charAt(0) || '') { + case ':': + comment.reference.push(line.substr(1).trim()); + break; + case '.': + comment.extracted.push(line.substr(1).replace(/^\s+/, '')); + break; + case ',': + comment.flag.push(line.substr(1).replace(/^\s+/, '')); + break; + case '|': + comment.previous.push(line.substr(1).replace(/^\s+/, '')); + break; + default: + comment.translator.push(line.replace(/^\s+/, '')); + } + }); + + node.value = {}; + + Object.keys(comment).forEach(function(key) { + if (comment[key] && comment[key].length) { + node.value[key] = comment[key].join('\n'); + } + }); + } + }).bind(this)); +}; + +/** + * Join gettext keys with values + * + * @param {Object} tokens Parsed tokens + * @return {Object} Tokens + */ +Parser.prototype._handleKeys = function(tokens) { + var response = [], + lastNode; + + for (var i = 0, len = tokens.length; i < len; i++) { + if (tokens[i].type === this.types.key) { + lastNode = { + key: tokens[i].value + }; + if (i && tokens[i - 1].type === this.types.comments) { + lastNode.comments = tokens[i - 1].value; + } + lastNode.value = ''; + response.push(lastNode); + } else if (tokens[i].type === this.types.string && lastNode) { + lastNode.value += tokens[i].value; + } + } + + return response; +}; + +/** + * Separate different values into individual translation objects + * + * @param {Object} tokens Parsed tokens + * @return {Object} Tokens + */ +Parser.prototype._handleValues = function(tokens) { + var response = [], + lastNode, curContext, curComments; + + for (var i = 0, len = tokens.length; i < len; i++) { + if (tokens[i].key.toLowerCase() === 'msgctxt') { + curContext = tokens[i].value; + curComments = tokens[i].comments; + } else if (tokens[i].key.toLowerCase() === 'msgid') { + lastNode = { + msgid: tokens[i].value + }; + + if (curContext) { + lastNode.msgctxt = curContext; + } + + if (curComments) { + lastNode.comments = curComments; + } + + if (tokens[i].comments && !lastNode.comments) { + lastNode.comments = tokens[i].comments; + } + + curContext = false; + curComments = false; + response.push(lastNode); + } else if (tokens[i].key.toLowerCase() === 'msgid_plural') { + if (lastNode) { + lastNode.msgid_plural = tokens[i].value; + } + + if (tokens[i].comments && !lastNode.comments) { + lastNode.comments = tokens[i].comments; + } + + curContext = false; + curComments = false; + } else if (tokens[i].key.substr(0, 6).toLowerCase() === 'msgstr') { + if (lastNode) { + lastNode.msgstr = (lastNode.msgstr || []).concat(tokens[i].value); + } + + if (tokens[i].comments && !lastNode.comments) { + lastNode.comments = tokens[i].comments; + } + + curContext = false; + curComments = false; + } + } + + return response; +}; + +/** + * Compose a translation table from tokens object + * + * @param {Object} tokens Parsed tokens + * @return {Object} Translation table + */ +Parser.prototype._normalize = function(tokens) { + var msgctxt, + table = { + charset: this._charset, + headers: undefined, + translations: {} + }; + + for (var i = 0, len = tokens.length; i < len; i++) { + msgctxt = tokens[i].msgctxt || ''; + + if (!table.translations[msgctxt]) { + table.translations[msgctxt] = {}; + } + + if (!table.headers && !msgctxt && !tokens[i].msgid) { + table.headers = sharedFuncs.parseHeader(tokens[i].msgstr[0]); + } + + table.translations[msgctxt][tokens[i].msgid] = tokens[i]; + } + + return table; +}; + +/** + * Converts parsed tokens to a translation table + * + * @param {Object} tokens Parsed tokens + * @returns {Object} Translation table + */ +Parser.prototype._finalize = function(tokens) { + var data = this._joinStringValues(tokens); + this._parseComments(data); + data = this._handleKeys(data); + data = this._handleValues(data); + + return this._normalize(data); +}; + +/** + * Creates a transform stream for parsing PO input + * + * @constructor + * @param {String} [defaultCharset] Default charset to use + * @param {String} [options] Stream options + */ +function PoParserTransform(defaultCharset, options) { + if (!options && defaultCharset && typeof defaultCharset === 'object') { + options = defaultCharset; + defaultCharset = undefined; + } + + this.defaultCharset = defaultCharset; + this._parser = false; + this._tokens = {}; + + this._cache = []; + this._cacheSize = 0; + + this.initialTreshold = options.initialTreshold || 2 * 1024; + + Transform.call(this, options); + this._writableState.objectMode = false; + this._readableState.objectMode = true; +} +util.inherits(PoParserTransform, Transform); + +/** + * Processes a chunk of the input stream + */ +PoParserTransform.prototype._transform = function(chunk, encoding, done) { + var i, len = 0; + + if (!chunk || !chunk.length) { + return done(); + } + + if (!this._parser) { + this._cache.push(chunk); + this._cacheSize += chunk.length; + + // wait until the first 1kb before parsing headers for charset + if (this._cacheSize < this.initialTreshold) { + return setImmediate(done); + } else if (this._cacheSize) { + chunk = Buffer.concat(this._cache, this._cacheSize); + this._cacheSize = 0; + this._cache = []; + } + + this._parser = new Parser(chunk, this.defaultCharset); + } else if (this._cacheSize) { + // this only happens if we had an uncompleted 8bit sequence from the last iteration + this._cache.push(chunk); + this._cacheSize += chunk.length; + chunk = Buffer.concat(this._cache, this._cacheSize); + this._cacheSize = 0; + this._cache = []; + } + + // cache 8bit bytes from the end of the chunk + // helps if the chunk ends in the middle of an utf-8 sequence + for (i = chunk.length - 1; i >= 0; i--) { + if (chunk[i] >= 0x80) { + len++; + continue; + } + break; + } + // it seems we found some 8bit bytes from the end of the string, so let's cache these + if (len) { + this._cache = [chunk.slice(chunk.length - len)]; + this._cacheSize = this._cache[0].length; + chunk = chunk.slice(0, chunk.length - len); + } + + // chunk might be empty if it only contined of 8bit bytes and these were all cached + if (chunk.length) { + this._parser._lexer(this._parser._toString(chunk)); + } + + setImmediate(done); +}; + +/** + * Once all input has been processed emit the parsed translation table as an object + */ +PoParserTransform.prototype._flush = function(done) { + var chunk; + + if (this._cacheSize) { + chunk = Buffer.concat(this._cache, this._cacheSize); + } + + if (!this._parser && chunk) { + this._parser = new Parser(chunk, this.defaultCharset); + } + + if (chunk) { + this._parser._lexer(this._parser._toString(chunk)); + } + + if (this._parser) { + this.push(this._parser._finalize(this._parser._lex)); + } + + setImmediate(done); +};
\ No newline at end of file diff --git a/node_modules/gettext-parser/lib/shared.js b/node_modules/gettext-parser/lib/shared.js new file mode 100644 index 000000000..44bfb86f7 --- /dev/null +++ b/node_modules/gettext-parser/lib/shared.js @@ -0,0 +1,120 @@ +'use strict'; + +// Expose to the world +module.exports.parseHeader = parseHeader; +module.exports.generateHeader = generateHeader; +module.exports.formatCharset = formatCharset; +module.exports.foldLine = foldLine; + +/** + * Parses a header string into an object of key-value pairs + * + * @param {String} str Header string + * @return {Object} An object of key-value pairs + */ +function parseHeader(str) { + var lines = (str || '').split('\n'), + headers = {}; + + lines.forEach(function(line) { + var parts = line.trim().split(':'), + key = (parts.shift() || '').trim().toLowerCase(), + value = parts.join(':').trim(); + if (!key) { + return; + } + headers[key] = value; + }); + + return headers; +} + +/** + * Convert first letters after - to uppercase, other lowercase + * + * @param {String} str String to be updated + * @return {String} A string with uppercase words + */ +function upperCaseWords(str) { + return (str || '').toLowerCase().trim().replace(/^(MIME|POT?(?=\-)|[a-z])|\-[a-z]/gi, function(str) { + return str.toUpperCase(); + }); +} + +/** + * Joins a header object of key value pairs into a header string + * + * @param {Object} header Object of key value pairs + * @return {String} Header string + */ +function generateHeader(header) { + var lines = []; + + Object.keys(header || {}).forEach(function(key) { + if (key) { + lines.push(upperCaseWords(key) + ': ' + (header[key] || '').trim()); + } + }); + + return lines.join('\n') + (lines.length ? '\n' : ''); +} + +/** + * Normalizes charset name. Converts utf8 to utf-8, WIN1257 to windows-1257 etc. + * + * @param {String} charset Charset name + * @return {String} Normalized charset name + */ +function formatCharset(charset, defaultCharset) { + return (charset || 'iso-8859-1').toString().toLowerCase(). + replace(/^utf[\-_]?(\d+)$/, 'utf-$1'). + replace(/^win(?:dows)?[\-_]?(\d+)$/, 'windows-$1'). + replace(/^latin[\-_]?(\d+)$/, 'iso-8859-$1'). + replace(/^(us[\-_]?)?ascii$/, 'ascii'). + replace(/^charset$/, defaultCharset || 'iso-8859-1'). + trim(); +} + +/** + * Folds long lines according to PO format + * + * @param {String} str PO formatted string to be folded + * @param {Number} [maxLen=76] Maximum allowed length for folded lines + * @return {Array} An array of lines + */ +function foldLine(str, maxLen) { + + maxLen = maxLen || 76; + + var lines = [], + curLine = '', + pos = 0, + len = str.length, + match; + + while (pos < len) { + curLine = str.substr(pos, maxLen); + + // ensure that the line never ends with a partial escaping + // make longer lines if needed + while (curLine.substr(-1) === '\\' && pos + curLine.length < len) { + curLine += str.charAt(pos + curLine.length); + } + + // ensure that if possible, line breaks are done at reasonable places + if ((match = curLine.match(/\\n/))) { + curLine = curLine.substr(0, match.index + 2); + } else if (pos + curLine.length < len) { + if ((match = curLine.match(/(\s+)[^\s]*$/)) && match.index > 0) { + curLine = curLine.substr(0, match.index + match[1].length); + } else if ((match = curLine.match(/([\x21-\x40\x5b-\x60\x7b-\x7e]+)[^\x21-\x40\x5b-\x60\x7b-\x7e]*$/)) && match.index > 0) { + curLine = curLine.substr(0, match.index + match[1].length); + } + } + + lines.push(curLine); + pos += curLine.length; + } + + return lines; +}
\ No newline at end of file diff --git a/node_modules/gettext-parser/package.json b/node_modules/gettext-parser/package.json new file mode 100644 index 000000000..935c5b3ba --- /dev/null +++ b/node_modules/gettext-parser/package.json @@ -0,0 +1,93 @@ +{ + "_args": [ + [ + { + "raw": "gettext-parser@1.1.0", + "scope": null, + "escapedName": "gettext-parser", + "name": "gettext-parser", + "rawSpec": "1.1.0", + "spec": "1.1.0", + "type": "version" + }, + "/home/dold/repos/taler/wallet-webex/node_modules/po2json" + ] + ], + "_from": "gettext-parser@1.1.0", + "_id": "gettext-parser@1.1.0", + "_inCache": true, + "_location": "/gettext-parser", + "_npmUser": { + "name": "andris", + "email": "andris@node.ee" + }, + "_npmVersion": "1.4.28", + "_phantomChildren": {}, + "_requested": { + "raw": "gettext-parser@1.1.0", + "scope": null, + "escapedName": "gettext-parser", + "name": "gettext-parser", + "rawSpec": "1.1.0", + "spec": "1.1.0", + "type": "version" + }, + "_requiredBy": [ + "/po2json" + ], + "_resolved": "https://registry.npmjs.org/gettext-parser/-/gettext-parser-1.1.0.tgz", + "_shasum": "2c5a6638d893934b9b55037d0ad82cb7004b2679", + "_shrinkwrap": null, + "_spec": "gettext-parser@1.1.0", + "_where": "/home/dold/repos/taler/wallet-webex/node_modules/po2json", + "author": { + "name": "Andris Reinman" + }, + "bugs": { + "url": "https://github.com/andris9/gettext-parser/issues" + }, + "dependencies": { + "encoding": "^0.1.11" + }, + "description": "Parse and compile gettext po and mo files to/from json, nothing more, nothing less", + "devDependencies": { + "chai": "^1.10.0", + "grunt": "^0.4.5", + "grunt-contrib-jshint": "^0.10.0", + "grunt-mocha-test": "^0.12.7", + "mocha": "^2.1.0" + }, + "directories": {}, + "dist": { + "shasum": "2c5a6638d893934b9b55037d0ad82cb7004b2679", + "tarball": "https://registry.npmjs.org/gettext-parser/-/gettext-parser-1.1.0.tgz" + }, + "gitHead": "aaa83f561c45efd6e55e5088ed18831fb4f22b53", + "homepage": "http://github.com/andris9/gettext-parser", + "keywords": [ + "i18n", + "l10n", + "gettext", + "mo", + "po" + ], + "license": "MIT", + "main": "./index", + "maintainers": [ + { + "name": "andris", + "email": "andris@node.ee" + } + ], + "name": "gettext-parser", + "optionalDependencies": {}, + "readme": "ERROR: No README data found!", + "repository": { + "type": "git", + "url": "git+ssh://git@github.com/andris9/gettext-parser.git" + }, + "scripts": { + "test": "grunt" + }, + "version": "1.1.0" +} diff --git a/node_modules/gettext-parser/test/fixtures/latin13-mo.json b/node_modules/gettext-parser/test/fixtures/latin13-mo.json new file mode 100644 index 000000000..c91b4f78e --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/latin13-mo.json @@ -0,0 +1,69 @@ +{ + "charset": "iso-8859-13", + "headers": { + "project-id-version": "gettext-parser", + "report-msgid-bugs-to": "andris@node.ee", + "pot-creation-date": "2012-05-18 14:28:00+03:00", + "po-revision-date": "2012-05-18 14:44+0300", + "last-translator": "Andris Reinman <andris@kreata.ee>", + "language-team": "gettext-parser <andris@node.ee>", + "mime-version": "1.0", + "content-type": "text/plain; charset=iso-8859-13", + "content-transfer-encoding": "8bit", + "language": "", + "plural-forms": "nplurals=2; plural=(n!=1);", + "x-poedit-language": "Estonian", + "x-poedit-country": "ESTONIA", + "x-poedit-sourcecharset": "iso-8859-13" + }, + "translations": { + "": { + "": { + "msgid": "", + "msgstr": [ + "Project-Id-Version: gettext-parser\nReport-Msgid-Bugs-To: andris@node.ee\nPOT-Creation-Date: 2012-05-18 14:28:00+03:00\nPO-Revision-Date: 2012-05-18 14:44+0300\nLast-Translator: Andris Reinman <andris@kreata.ee>\nLanguage-Team: gettext-parser <andris@node.ee>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=iso-8859-13\nContent-Transfer-Encoding: 8bit\nLanguage: \nPlural-Forms: nplurals=2; plural=(n!=1);\nX-Poedit-Language: Estonian\nX-Poedit-Country: ESTONIA\nX-Poedit-Sourcecharset: iso-8859-13\n" + ] + }, + "o1": { + "msgid": "o1", + "msgstr": [ + "t1" + ] + }, + "o2-1": { + "msgid": "o2-1", + "msgid_plural": "o2-2", + "msgstr": [ + "t2-1", + "t2-2" + ] + }, + "o3-õäöü": { + "msgid": "o3-õäöü", + "msgstr": [ + "t3-žš" + ] + } + }, + "c1": { + "co1": { + "msgctxt": "c1", + "msgid": "co1", + "msgstr": [ + "ct1" + ] + } + }, + "c2": { + "co2-1": { + "msgctxt": "c2", + "msgid": "co2-1", + "msgid_plural": "co2-2", + "msgstr": [ + "ct2-1", + "ct2-2" + ] + } + } + } +}
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/fixtures/latin13-po.json b/node_modules/gettext-parser/test/fixtures/latin13-po.json new file mode 100644 index 000000000..c4b394d0c --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/latin13-po.json @@ -0,0 +1,100 @@ +{ + "charset": "iso-8859-13", + "headers": { + "project-id-version": "gettext-parser", + "report-msgid-bugs-to": "andris@node.ee", + "pot-creation-date": "2012-05-18 14:28:00+03:00", + "po-revision-date": "2012-05-18 14:44+0300", + "last-translator": "Andris Reinman <andris@kreata.ee>", + "language-team": "gettext-parser <andris@node.ee>", + "mime-version": "1.0", + "content-type": "text/plain; charset=iso-8859-13", + "content-transfer-encoding": "8bit", + "language": "", + "plural-forms": "nplurals=2; plural=(n!=1);", + "x-poedit-language": "Estonian", + "x-poedit-country": "ESTONIA", + "x-poedit-sourcecharset": "iso-8859-13" + }, + "translations": { + "": { + "": { + "msgid": "", + "comments": { + "translator": "gettext-parser test file.\nCopyright (C) 2012 Andris Reinman\nThis file is distributed under the same license as the gettext-parser package.\nANDRIS REINMAN <andris@node.ee>, 2012.\n" + }, + "msgstr": [ + "Project-Id-Version: gettext-parser\nReport-Msgid-Bugs-To: andris@node.ee\nPOT-Creation-Date: 2012-05-18 14:28:00+03:00\nPO-Revision-Date: 2012-05-18 14:44+0300\nLast-Translator: Andris Reinman <andris@kreata.ee>\nLanguage-Team: gettext-parser <andris@node.ee>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=iso-8859-13\nContent-Transfer-Encoding: 8bit\nLanguage: \nPlural-Forms: nplurals=2; plural=(n!=1);\nX-Poedit-Language: Estonian\nX-Poedit-Country: ESTONIA\nX-Poedit-Sourcecharset: iso-8859-13\n" + ] + }, + "o1": { + "msgid": "o1", + "comments": { + "translator": "Normal string" + }, + "msgstr": [ + "t1" + ] + }, + "o2-1": { + "msgid": "o2-1", + "comments": { + "translator": "Plural string" + }, + "msgid_plural": "o2-2", + "msgstr": [ + "t2-1", + "t2-2" + ] + }, + "o3-õäöü": { + "msgid": "o3-õäöü", + "comments": { + "translator": "Normal string with special chars" + }, + "msgstr": [ + "t3-žš" + ] + }, + "test": { + "msgid": "test", + "comments": { + "translator": "Normal comment line 1\nNormal comment line 2", + "extracted": "Editors note line 1\nEditors note line 2", + "reference": "/absolute/path:13\n/absolute/path:14", + "flag": "line 1\nline 2", + "previous": "line 3\nline 4" + }, + "msgstr": [ + "test" + ] + } + }, + "c1": { + "co1": { + "msgid": "co1", + "msgctxt": "c1", + "comments": { + "translator": "Normal string in a context" + }, + "msgstr": [ + "ct1" + ] + } + }, + "c2": { + "co2-1": { + "msgid": "co2-1", + "msgctxt": "c2", + "comments": { + "translator": "Plural string in a context" + }, + "msgid_plural": "co2-2", + "msgstr": [ + "ct2-1", + "ct2-2" + ] + } + } + } +}
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/fixtures/latin13.mo b/node_modules/gettext-parser/test/fixtures/latin13.mo Binary files differnew file mode 100644 index 000000000..bbfd0eb09 --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/latin13.mo diff --git a/node_modules/gettext-parser/test/fixtures/latin13.po b/node_modules/gettext-parser/test/fixtures/latin13.po new file mode 100644 index 000000000..d42a339bf --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/latin13.po @@ -0,0 +1,60 @@ +# gettext-parser test file. +# Copyright (C) 2012 Andris Reinman +# This file is distributed under the same license as the gettext-parser package. +# ANDRIS REINMAN <andris@node.ee>, 2012. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-parser\n" +"Report-Msgid-Bugs-To: andris@node.ee\n" +"POT-Creation-Date: 2012-05-18 14:28:00+03:00\n" +"PO-Revision-Date: 2012-05-18 14:44+0300\n" +"Last-Translator: Andris Reinman <andris@kreata.ee>\n" +"Language-Team: gettext-parser <andris@node.ee>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=iso-8859-13\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: \n" +"Plural-Forms: nplurals=2; plural=(n!=1);\n" +"X-Poedit-Language: Estonian\n" +"X-Poedit-Country: ESTONIA\n" +"X-Poedit-Sourcecharset: iso-8859-13\n" + +# Normal string +msgid "o1" +msgstr "t1" + +# Plural string +msgid "o2-1" +msgid_plural "o2-2" +msgstr[0] "t2-1" +msgstr[1] "t2-2" + +# Normal string with special chars +msgid "o3-õäöü" +msgstr "t3-þð" + +# Normal comment line 1 +# Normal comment line 2 +#: /absolute/path:13 +#: /absolute/path:14 +#. Editors note line 1 +#. Editors note line 2 +#, line 1 +#, line 2 +#| line 3 +#| line 4 +msgid "test" +msgstr "test" + +# Normal string in a context +msgctxt "c1" +msgid "co1" +msgstr "ct1" + +# Plural string in a context +msgctxt "c2" +msgid "co2-1" +msgid_plural "co2-2" +msgstr[0] "ct2-1" +msgstr[1] "ct2-2"
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/fixtures/utf8-mo.json b/node_modules/gettext-parser/test/fixtures/utf8-mo.json new file mode 100644 index 000000000..29b2c0852 --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/utf8-mo.json @@ -0,0 +1,69 @@ +{ + "charset": "utf-8", + "headers": { + "project-id-version": "gettext-parser", + "report-msgid-bugs-to": "andris@node.ee", + "pot-creation-date": "2012-05-18 14:28:00+03:00", + "po-revision-date": "2012-05-18 14:37+0300", + "last-translator": "Andris Reinman <andris@kreata.ee>", + "language-team": "gettext-parser <andris@node.ee>", + "mime-version": "1.0", + "content-type": "text/plain; charset=utf-8", + "content-transfer-encoding": "8bit", + "language": "", + "plural-forms": "nplurals=2; plural=(n!=1);", + "x-poedit-language": "Estonian", + "x-poedit-country": "ESTONIA", + "x-poedit-sourcecharset": "utf-8" + }, + "translations": { + "": { + "": { + "msgid": "", + "msgstr": [ + "Project-Id-Version: gettext-parser\nReport-Msgid-Bugs-To: andris@node.ee\nPOT-Creation-Date: 2012-05-18 14:28:00+03:00\nPO-Revision-Date: 2012-05-18 14:37+0300\nLast-Translator: Andris Reinman <andris@kreata.ee>\nLanguage-Team: gettext-parser <andris@node.ee>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=utf-8\nContent-Transfer-Encoding: 8bit\nLanguage: \nPlural-Forms: nplurals=2; plural=(n!=1);\nX-Poedit-Language: Estonian\nX-Poedit-Country: ESTONIA\nX-Poedit-Sourcecharset: utf-8\n" + ] + }, + "o1": { + "msgid": "o1", + "msgstr": [ + "t1" + ] + }, + "o2-1": { + "msgid": "o2-1", + "msgid_plural": "o2-2", + "msgstr": [ + "t2-1", + "t2-2" + ] + }, + "o3-õäöü": { + "msgid": "o3-õäöü", + "msgstr": [ + "t3-žš" + ] + } + }, + "c1": { + "co1": { + "msgctxt": "c1", + "msgid": "co1", + "msgstr": [ + "ct1" + ] + } + }, + "c2": { + "co2-1": { + "msgctxt": "c2", + "msgid": "co2-1", + "msgid_plural": "co2-2", + "msgstr": [ + "ct2-1", + "ct2-2" + ] + } + } + } +}
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/fixtures/utf8-po.json b/node_modules/gettext-parser/test/fixtures/utf8-po.json new file mode 100644 index 000000000..e5921b7e8 --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/utf8-po.json @@ -0,0 +1,127 @@ +{ + "charset": "utf-8", + "headers": { + "project-id-version": "gettext-parser", + "report-msgid-bugs-to": "andris@node.ee", + "pot-creation-date": "2012-05-18 14:28:00+03:00", + "po-revision-date": "2012-05-18 14:37+0300", + "last-translator": "Andris Reinman <andris@kreata.ee>", + "language-team": "gettext-parser <andris@node.ee>", + "mime-version": "1.0", + "content-type": "text/plain; charset=utf-8", + "content-transfer-encoding": "8bit", + "language": "", + "plural-forms": "nplurals=2; plural=(n!=1);", + "x-poedit-language": "Estonian", + "x-poedit-country": "ESTONIA", + "x-poedit-sourcecharset": "utf-8" + }, + "translations": { + "": { + "": { + "msgid": "", + "comments": { + "translator": "gettext-parser test file.\nCopyright (C) 2012 Andris Reinman\nThis file is distributed under the same license as the gettext-parser package.\nANDRIS REINMAN <andris@node.ee>, 2012.\n" + }, + "msgstr": [ + "Project-Id-Version: gettext-parser\nReport-Msgid-Bugs-To: andris@node.ee\nPOT-Creation-Date: 2012-05-18 14:28:00+03:00\nPO-Revision-Date: 2012-05-18 14:37+0300\nLast-Translator: Andris Reinman <andris@kreata.ee>\nLanguage-Team: gettext-parser <andris@node.ee>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=utf-8\nContent-Transfer-Encoding: 8bit\nLanguage: \nPlural-Forms: nplurals=2; plural=(n!=1);\nX-Poedit-Language: Estonian\nX-Poedit-Country: ESTONIA\nX-Poedit-Sourcecharset: utf-8\n" + ] + }, + "o1": { + "msgid": "o1", + "comments": { + "translator": "Normal string" + }, + "msgstr": [ + "t1" + ] + }, + "o2-1": { + "msgid": "o2-1", + "comments": { + "translator": "Plural string" + }, + "msgid_plural": "o2-2\no2-3\no2-4", + "msgstr": [ + "t2-1", + "t2-2" + ] + }, + "o3-õäöü": { + "msgid": "o3-õäöü", + "comments": { + "translator": "Normal string with special chars" + }, + "msgstr": [ + "t3-žš" + ] + }, + "test": { + "msgid": "test", + "comments": { + "translator": "Normal comment line 1\nNormal comment line 2", + "extracted": "Editors note line 1\nEditors note line 2", + "reference": "/absolute/path:13\n/absolute/path:14", + "flag": "line 1\nline 2", + "previous": "line 3\nline 4" + }, + "msgstr": [ + "test" + ] + }, + "\"\\'\t": { + "msgid": "\"\\'\t", + "comments": { + "translator": "String with escapes" + }, + "msgstr": [ + "\"\\'\t" + ] + } + }, + "c1": { + "co1": { + "msgid": "co1", + "msgctxt": "c1", + "comments": { + "translator": "Normal string in a context" + }, + "msgstr": [ + "ct1" + ] + } + }, + "c2": { + "co2-1": { + "msgid": "co2-1", + "msgctxt": "c2", + "comments": { + "translator": "Plural string in a context" + }, + "msgid_plural": "co2-2", + "msgstr": [ + "ct2-1", + "ct2-2" + ] + } + }, + "Button label": { + "Log in": { + "msgid": "Log in", + "msgctxt": "Button label", + "msgstr": [ + "" + ] + } + }, + "Dialog title": { + "Log in": { + "msgid": "Log in", + "msgctxt": "Dialog title", + "msgstr": [ + "" + ] + } + } + } +}
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/fixtures/utf8.mo b/node_modules/gettext-parser/test/fixtures/utf8.mo Binary files differnew file mode 100644 index 000000000..efafe43e6 --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/utf8.mo diff --git a/node_modules/gettext-parser/test/fixtures/utf8.po b/node_modules/gettext-parser/test/fixtures/utf8.po new file mode 100644 index 000000000..3d879463a --- /dev/null +++ b/node_modules/gettext-parser/test/fixtures/utf8.po @@ -0,0 +1,75 @@ +# gettext-parser test file. +# Copyright (C) 2012 Andris Reinman +# This file is distributed under the same license as the gettext-parser package. +# ANDRIS REINMAN <andris@node.ee>, 2012. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-parser\n" +"Report-Msgid-Bugs-To: andris@node.ee\n" +"POT-Creation-Date: 2012-05-18 14:28:00+03:00\n" +"PO-Revision-Date: 2012-05-18 14:37+0300\n" +"Last-Translator: Andris Reinman <andris@kreata.ee>\n" +"Language-Team: gettext-parser <andris@node.ee>\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: \n" +"Plural-Forms: nplurals=2; plural=(n!=1);\n" +"X-Poedit-Language: Estonian\n" +"X-Poedit-Country: ESTONIA\n" +"X-Poedit-Sourcecharset: utf-8\n" + +# Normal string +msgid "o1" +msgstr "t1" + +# Plural string +msgid "o2-1" +msgid_plural "" +"o2-2\n" +"o2-3\n" +"o2-4" +msgstr[0] "t2-1" +msgstr[1] "t2-2" + +# Normal string with special chars +msgid "o3-õäöü" +msgstr "t3-žš" + +# Normal comment line 1 +# Normal comment line 2 +#: /absolute/path:13 +#: /absolute/path:14 +#. Editors note line 1 +#. Editors note line 2 +#, line 1 +#, line 2 +#| line 3 +#| line 4 +msgid "test" +msgstr "test" + +# String with escapes +msgid "\"\\'\t" +msgstr "\"\\'\t" + +# Normal string in a context +msgctxt "c1" +msgid "co1" +msgstr "ct1" + +# Plural string in a context +msgctxt "c2" +msgid "co2-1" +msgid_plural "co2-2" +msgstr[0] "ct2-1" +msgstr[1] "ct2-2" + +msgctxt "Button label" +msgid "Log in" +msgstr "" + +msgctxt "Dialog title" +msgid "Log in" +msgstr ""
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/folder-test.js b/node_modules/gettext-parser/test/folder-test.js new file mode 100644 index 000000000..7f1bbecae --- /dev/null +++ b/node_modules/gettext-parser/test/folder-test.js @@ -0,0 +1,40 @@ +'use strict'; + +var chai = require('chai'); +var sharedFuncs = require('../lib/shared'); + +var expect = chai.expect; +chai.config.includeStack = true; + +describe('Folding tests', function() { + + it('Short line, no folding', function() { + var line = 'abc def ghi'; + var folded = sharedFuncs.foldLine(line); + + expect(line).to.equal(folded.join('')); + expect(folded.length).to.equal(1); + }); + + it('Short line, force fold with newline', function() { + var line = 'abc \\ndef \\nghi'; + var folded = sharedFuncs.foldLine(line); + + expect(line).to.equal(folded.join('')); + expect(folded).to.deep.equal(['abc \\n', 'def \\n', 'ghi']); + }); + + it('Long line', function() { + var expected = ['Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum pretium ', + 'a nunc ac fringilla. Nulla laoreet tincidunt tincidunt. Proin tristique ', + 'vestibulum mauris non aliquam. Vivamus volutpat odio nisl, sed placerat ', + 'turpis sodales a. Vestibulum quis lectus ac elit sagittis sodales ac a ', + 'felis. Nulla iaculis, nisl ut mattis fringilla, tortor quam tincidunt ', + 'lorem, quis feugiat purus felis ut velit. Donec euismod eros ut leo ', + 'lobortis tristique.' + ]; + var folded = sharedFuncs.foldLine(expected.join('')); + expect(folded).to.deep.equal(expected); + }); + +});
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/mo-compiler-test.js b/node_modules/gettext-parser/test/mo-compiler-test.js new file mode 100644 index 000000000..77b820f81 --- /dev/null +++ b/node_modules/gettext-parser/test/mo-compiler-test.js @@ -0,0 +1,30 @@ +'use strict'; + +var chai = require('chai'); +var gettextParser = require('..'); +var fs = require('fs'); + +var expect = chai.expect; +chai.config.includeStack = true; + +describe('MO Compiler', function() { + + describe('UTF-8', function() { + it('should compile', function() { + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/utf8-mo.json', 'utf-8')); + var mo = fs.readFileSync(__dirname + '/fixtures/utf8.mo'); + + var compiled = gettextParser.mo.compile(json); + expect(compiled).to.deep.equal(mo); + }); + }); + + describe('Latin-13', function() { + it('should compile', function() { + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/latin13-mo.json', 'utf-8')); + var mo = fs.readFileSync(__dirname + '/fixtures/latin13.mo'); + var compiled = gettextParser.mo.compile(json); + expect(compiled).to.deep.equal(mo); + }); + }); +});
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/mo-parser-test.js b/node_modules/gettext-parser/test/mo-parser-test.js new file mode 100644 index 000000000..a8d9895c4 --- /dev/null +++ b/node_modules/gettext-parser/test/mo-parser-test.js @@ -0,0 +1,29 @@ +'use strict'; + +var chai = require('chai'); +var gettextParser = require('..'); +var fs = require('fs'); + +var expect = chai.expect; +chai.config.includeStack = true; + +describe('MO Parser', function() { + + describe('UTF-8', function() { + it('should parse', function() { + var mo = fs.readFileSync(__dirname + '/fixtures/utf8.mo'); + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/utf8-mo.json', 'utf-8')); + var parsed = gettextParser.mo.parse(mo); + expect(parsed).to.deep.equal(json); + }); + }); + + describe('Latin-13', function() { + it('should parse', function() { + var mo = fs.readFileSync(__dirname + '/fixtures/latin13.mo'); + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/latin13-mo.json', 'utf-8')); + var parsed = gettextParser.mo.parse(mo); + expect(parsed).to.deep.equal(json); + }); + }); +});
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/po-compiler-test.js b/node_modules/gettext-parser/test/po-compiler-test.js new file mode 100644 index 000000000..d93ae825a --- /dev/null +++ b/node_modules/gettext-parser/test/po-compiler-test.js @@ -0,0 +1,30 @@ +'use strict'; + +var chai = require('chai'); +var gettextParser = require('..'); +var fs = require('fs'); + +var expect = chai.expect; +chai.config.includeStack = true; + +describe('PO Compiler', function() { + + describe('UTF-8', function() { + it('should compile', function() { + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/utf8-po.json', 'utf-8')); + var po = fs.readFileSync(__dirname + '/fixtures/utf8.po'); + + var compiled = gettextParser.po.compile(json); + expect(compiled).to.deep.equal(po); + }); + }); + + describe('Latin-13', function() { + it('should compile', function() { + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/latin13-po.json', 'utf-8')); + var po = fs.readFileSync(__dirname + '/fixtures/latin13.po'); + var compiled = gettextParser.po.compile(json); + expect(compiled).to.deep.equal(po); + }); + }); +});
\ No newline at end of file diff --git a/node_modules/gettext-parser/test/po-parser-test.js b/node_modules/gettext-parser/test/po-parser-test.js new file mode 100644 index 000000000..8e908fec9 --- /dev/null +++ b/node_modules/gettext-parser/test/po-parser-test.js @@ -0,0 +1,61 @@ +'use strict'; + +var chai = require('chai'); +var gettextParser = require('..'); +var fs = require('fs'); + +var expect = chai.expect; +chai.config.includeStack = true; + +describe('PO Parser', function() { + + describe('UTF-8', function() { + it('should parse', function() { + var po = fs.readFileSync(__dirname + '/fixtures/utf8.po'); + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/utf8-po.json', 'utf-8')); + var parsed = gettextParser.po.parse(po); + expect(parsed).to.deep.equal(json); + }); + }); + + describe('UTF-8 as a string', function() { + it('should parse', function() { + var po = fs.readFileSync(__dirname + '/fixtures/utf8.po', 'utf-8'); + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/utf8-po.json', 'utf-8')); + var parsed = gettextParser.po.parse(po); + expect(parsed).to.deep.equal(json); + }); + }); + + describe('Stream input', function() { + it('should parse', function(done) { + var po = fs.createReadStream(__dirname + '/fixtures/utf8.po', { + highWaterMark: 1 // ensure that any utf-8 sequences will be broken when streaming + }); + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/utf8-po.json', 'utf-8')); + + var parsed; + var stream = po.pipe(gettextParser.po.createParseStream({ + initialTreshold: 800 // home many bytes to cache for parsing the header + })); + stream.on('data', function(data) { + parsed = data; + }); + stream.on('end', function() { + expect(parsed).to.deep.equal(json); + done(); + }); + + }); + }); + + describe('Latin-13', function() { + it('should parse', function() { + var po = fs.readFileSync(__dirname + '/fixtures/latin13.po'); + var json = JSON.parse(fs.readFileSync(__dirname + '/fixtures/latin13-po.json', 'utf-8')); + var parsed = gettextParser.po.parse(po); + expect(parsed).to.deep.equal(json); + }); + }); + +});
\ No newline at end of file |