Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
function mkParser(pegjsFilename, outputFilename) {
var grammar = fs.readFileSync(pegjsFilename, 'utf8');
try {
var parserSrc = peg.generate(grammar, {
format: 'bare',
output: 'source',
optimize: "speed" // or "size"
});
} catch (e) {
console.error(e);
process.exit(1);
}
parserSrc = 'module.exports =\n' + parserSrc.replace("\n(function() {\n", "\nfunction(plywood, chronoshift) {\n").replace("\n})()", "\n}");
fs.writeFileSync(outputFilename, parserSrc, 'utf8');
}
path.exists(parserFile, function(file) {
if (file) {
if (debug) console.log('Find a parser');
parser = require(parserFile);
} else {
// re-generate a parser
try {
if (debug) console.log('Re-building a parser ...');
grammar = grammar + macro;
start = new Date();
parser = PEG.buildParser(grammar, { cache: true, trackLineAndColumn: true });
end = new Date();
if (debug) console.log('Done.\nTime: %ds.\n', (end.getTime() - start.getTime()) / 1000);
} catch (e) {
// console.log("Line " + e.line + ", column " + e.column + ": " + e.message + "\n");
printErrorMessage(e);
process.exit(1);
}
}
// make a parse tree
try {
if (debug) console.log('Parsing a JavaScript code ...');
start = new Date();
tree = parser.parse(jsCode, 'start');
end = new Date();
if (debug) console.log('Done.\nTime: %ds.\n%s', (end.getTime() - start.getTime()) / 1000, JSON.stringify(tree, null, 2));
1
)),
"",
" case " + op.MATCH_STRING_IC + ":", // MATCH_STRING_IC s, a, f, ...
indent10(generateCondition(
"input.substr(peg$currPos, (peg$consts[bc[ip + 1]] as string).length).toLowerCase() === peg$consts[bc[ip + 1]]",
1
)),
"",
" case " + op.MATCH_REGEXP + ":", // MATCH_REGEXP r, a, f, ...
indent10(generateCondition(
"(peg$consts[bc[ip + 1]] as RegExp).test(input.charAt(peg$currPos))",
1
)),
"",
" case " + op.ACCEPT_N + ":", // ACCEPT_N n
" stack.push(input.substr(peg$currPos, bc[ip + 1]));",
" peg$currPos += bc[ip + 1];",
" ip += 2;",
" break;",
"",
" case " + op.ACCEPT_STRING + ":", // ACCEPT_STRING s
" stack.push(peg$consts[bc[ip + 1]]);",
" peg$currPos += (peg$consts[bc[ip + 1]] as string).length;",
" ip += 2;",
" break;",
"",
" case " + op.FAIL + ":", // FAIL e
" stack.push(peg$FAILED);",
" if (peg$silentFails === 0) {",
" peg$fail(peg$consts[bc[ip + 1]] as ILiteralExpectation);",
" }",
"input.substr(peg$currPos, " +
eval(ast.consts[bc[ip + 1]]).length +
").toLowerCase() === " +
c(bc[ip + 1]),
1
);
break;
case op.MATCH_REGEXP: // MATCH_REGEXP r, a, f, ...
compileCondition(
c(bc[ip + 1]) + ".test(input.charAt(peg$currPos))",
1
);
break;
case op.ACCEPT_N: // ACCEPT_N n
parts.push(stack.push(
bc[ip + 1] > 1 ?
"input.substr(peg$currPos, " + bc[ip + 1] + ")" :
"input.charAt(peg$currPos)"
));
parts.push(
bc[ip + 1] > 1 ?
"peg$currPos += " + bc[ip + 1] + ";" :
"peg$currPos++;"
);
ip += 2;
break;
case op.ACCEPT_STRING: // ACCEPT_STRING s
parts.push(stack.push(c(bc[ip + 1])));
parts.push(
1
)),
"",
" case " + op.MATCH_REGEXP + ":", // MATCH_REGEXP r, a, f, ...
indent10(generateCondition(
"(peg$consts[bc[ip + 1]] as RegExp).test(input.charAt(peg$currPos))",
1
)),
"",
" case " + op.ACCEPT_N + ":", // ACCEPT_N n
" stack.push(input.substr(peg$currPos, bc[ip + 1]));",
" peg$currPos += bc[ip + 1];",
" ip += 2;",
" break;",
"",
" case " + op.ACCEPT_STRING + ":", // ACCEPT_STRING s
" stack.push(peg$consts[bc[ip + 1]]);",
" peg$currPos += (peg$consts[bc[ip + 1]] as string).length;",
" ip += 2;",
" break;",
"",
" case " + op.FAIL + ":", // FAIL e
" stack.push(peg$FAILED);",
" if (peg$silentFails === 0) {",
" peg$fail(peg$consts[bc[ip + 1]] as ILiteralExpectation);",
" }",
" ip += 2;",
" break;",
"",
" case " + op.LOAD_SAVED_POS + ":", // LOAD_SAVED_POS p
" peg$savedPos = stack[stack.length - 1 - bc[ip + 1]];",
" ip += 2;",
// The content of the grammar file
const pegContent = fs.readFileSync(__dirname + path.sep + pegFileName, 'utf8');
// Only these entries are allowed in the grammar file
const allowedStartRules = [
'CompilationUnit',
'TypeDeclaration',
'ClassBodyDeclaration',
'BlockStatement',
'Expression',
'Type',
];
// The generated peg parser
// Use cache to improve performance
const parser = peg.generate(pegContent, {
cache: true,
allowedStartRules,
});
// Parse the source code into AST nodes
const parse = (src, options) => {
const result = pegUtil.parse(parser, src, options);
if(result.error) {
throw new Error("ERROR: Parsing Failure:\n" +
pegUtil.errorMessage(result.error, true).replace(/^/mg, "ERROR: "));
}
else {
return result.ast;
}
};
import fs from 'fs';
import path from 'path';
let grammar;
try {
grammar = require('./link'); // eslint-disable-line
} catch (ex) {
// Permits using compiling grammar when using ES2015 source
const peg = require('pegjs'); // eslint-disable-line
grammar = peg.generate(
fs.readFileSync(path.join(__dirname, 'link.pegjs'), 'utf8')
);
}
module.exports = {
grammar,
};
// RESULT peg.js master/HEAD
// parse0 x 26.06 ops/sec ±5.00% (49 runs sampled)
// parse1 x 123 ops/sec ±1.32% (81 runs sampled)
// parse2 x 121 ops/sec ±1.08% (80 runs sampled)
// Fastest is parse1
var fs = require("fs");
var content = fs.readFileSync("./vvakame.re", {encoding: "utf8"});
var PEG = require("pegjs");
var reviewParser = require("../resources/grammar").PEG;
var baseGrammar = fs.readFileSync("../resources/grammar.pegjs", {encoding: "utf8"});
var base1 = PEG.buildParser(baseGrammar);
fs.writeFileSync("./base1.js", base1.parse.toString() + "\n module.exports = peg$parse;");
var base1parser = require("./base1");
var base2 = PEG.buildParser(baseGrammar, {cache: true});
fs.writeFileSync("./base2.js", base2.parse.toString() + "\n module.exports = peg$parse;");
var base2parser = require("./base2");
var benchmark = require('benchmark');
var suite = new benchmark.Suite();
suite
.add("parse0", function() {
reviewParser.parse(content);
})
.add("parse1", function() {
base1parser(content);
})
PegTokenizer.prototype.initTokenizer = function() {
// Construct a singleton static tokenizer.
var pegSrcPath = path.join(__dirname, 'pegTokenizer.pegjs');
this.src = fs.readFileSync(pegSrcPath, 'utf8');
// FIXME: Don't report infinite loops, i.e. repeated subexpressions which
// can match the empty string, since our grammar gives several false
// positives (or perhaps true positives).
delete PEG.compiler.passes.check.reportInfiniteLoops;
function cacheRuleHook(opts) {
var maxVisitCount = 20;
return {
start: [
[
'var checkCache = visitCounts[', opts.startPos,
'] > ', maxVisitCount, ';',
].join(''),
'var cached, bucket, key;',
'if (checkCache) {',
[
' key = (', opts.variantIndex, '+',
opts.variantCount, '*', opts.ruleIndex,
').toString() + stops.key;',
].join(''),
literal(node) {
if (node.value.length > 0) {
let stringIndex = addConst("\""
+ js.stringEscape(
node.ignoreCase ? node.value.toLowerCase() : node.value
)
+ "\""
);
let expectedIndex = addConst(
"peg$literalExpectation("
+ "\"" + js.stringEscape(node.value) + "\", "
+ node.ignoreCase
+ ")"
);
// For case-sensitive strings the value must match the beginning of the
// remaining input exactly. As a result, we can use |ACCEPT_STRING| and
// save one |substr| call that would be needed if we used |ACCEPT_N|.
return buildCondition(
node.ignoreCase
? [op.MATCH_STRING_IC, stringIndex]
: [op.MATCH_STRING, stringIndex],
node.ignoreCase
? [op.ACCEPT_N, node.value.length]
: [op.ACCEPT_STRING, stringIndex],
[op.FAIL, expectedIndex]
);