Skip to content

Commit

Permalink
DEV-5579 Use benchmark.js to test performance of fontoxpath with qt3t…
Browse files Browse the repository at this point in the history
…ests
  • Loading branch information
Mehmet Coskun committed Apr 15, 2020
1 parent c6e90d1 commit 46e6cf3
Show file tree
Hide file tree
Showing 8 changed files with 747 additions and 313 deletions.
1 change: 1 addition & 0 deletions .gitignore
Expand Up @@ -2,6 +2,7 @@
/node_modules
/test/assets/QT3TS
/test/assets/XQUTS
/test/assets/runnablePerformanceTestNames.csv
/test/built
/built
/demo/built
Expand Down
12 changes: 12 additions & 0 deletions CONTRIBUTING.md
Expand Up @@ -153,6 +153,7 @@ FontoXPath contains different test sets:
|The QT3 XQueryX tests|`npm run qt3testsxqueryx`|
|The XQUTS tests|`npm run xqutstests`|
|The XQUTS XQueryX tests|`npm run xqutstestsxqueryx`|
|The QT3 performance tests|`npm run performance`|

They all run in Node. By running the tests with the `--inspect` flag,
they can be debugged by the browser: `npm run test -- --inspect
Expand Down Expand Up @@ -184,6 +185,17 @@ If you are adding a new feature, don't forget to edit the file
`test/runnableTestSets.csv`. This file disables tests for features we
have not yet implemented.

To check the performance of fontoxpath we pick a random subset of the
qt3tests as running all will take too long (hours). This random subset
is not checked in but can be generated using
`npm run performance -- --regenerate [<number-of-tests>]`, this will create
and populate `test/runnablePerformanceTestNames.csv`. You can manually
edit this file to run specific tests. By storing these in a file you can
run the same set even when switching between different commits. With the
generated file present you can run the tests using `npm run performance`,
this will run a benchmark for each qt3 test using
[benchmarkjs](https://benchmarkjs.com/).

### Building

When the new function seems to work, you can make a build of
Expand Down
22 changes: 22 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions package.json
Expand Up @@ -18,6 +18,7 @@
"coverage": "nyc report --reporter=text-lcov | coveralls",
"integrationtests": "ts-mocha --require test/testhook.js \"test/specs/parsing/**/*.ts\" -p test/tsconfig.json",
"prepare": "npm run build",
"performance": "ts-node -P test/tsconfig.json -r tsconfig-paths/register test/qt3testsBenchmark.ts",
"qt3tests": "ts-mocha --paths -p test/tsconfig.json test/qt3tests.ts",
"qt3testsxqueryx": "ts-mocha --paths -p test/tsconfig.json test/qt3testsXQueryX.ts",
"start": "concurrently -k -p \"[{name}]\" -n \"TypeScript,Node\" \"tsc -p ./demo/tsconfig.json -w\" \"ts-node ./demo/server.ts --dist\"",
Expand Down Expand Up @@ -61,11 +62,13 @@
"devDependencies": {
"@microsoft/api-extractor": "^7.7.10",
"@tscc/tscc": "^0.4.8",
"@types/benchmark": "^1.0.31",
"@types/chai": "^4.2.11",
"@types/mocha": "^7.0.2",
"@types/node": "^13.5.0",
"@types/node-static": "^0.7.3",
"@types/sinon": "^7.5.2",
"benchmark": "^2.1.4",
"chai": "^4.2.0",
"concurrently": "^5.1.0",
"coveralls": "^3.0.11",
Expand Down
81 changes: 81 additions & 0 deletions test/helpers/getPerformanceTests.ts
@@ -0,0 +1,81 @@
import { evaluateXPathToNodes, evaluateXPathToString, evaluateXPathToFirstNode } from 'fontoxpath';

import testFs from './testFs';
import {
ALL_TESTS_QUERY,
getAllTestSets,
getFile,
unrunnableTestCasesByName,
} from './qt3TestsTools';

const PERFORMANCE_TESTS_FILE = 'runnablePerformanceTestNames.csv';
const DEFAULT_NUMBER_OF_TEST = 50;

const createNewRunnableTests = (numberOfTest) => {
const allTestSets = getAllTestSets();
const allTestNames = [];
const newRunnableTests = new Set();

for (const testSetFileName of allTestSets) {
const testSet = getFile(testSetFileName);

// Find all the tests we can run
const testCases = evaluateXPathToNodes(ALL_TESTS_QUERY, testSet);

for (const testCase of testCases) {
try {
const testName = evaluateXPathToString('./@name', testCase);

if (unrunnableTestCasesByName[testName]) {
continue;
}

const assertionNode = evaluateXPathToFirstNode('./result/*', testCase);
const assertionType = assertionNode && (assertionNode as any).localName;

switch (assertionType) {
case 'assert':
case 'assert-true':
case 'assert-eq':
case 'assert-deep-eq':
case 'assert-empty':
case 'assert-false':
case 'assert-count':
case 'assert-type':
case 'assert-xml':
case 'assert-string-value':
case 'all-of':
case 'any-of':
allTestNames.push(testName);
}
} catch (e) {
// tslint:disable-next-line: no-console
console.error(e);
continue;
}
}
}

while (newRunnableTests.size < numberOfTest) {
const randomTestName = allTestNames[Math.floor(Math.random() * allTestNames.length)];
newRunnableTests.add(randomTestName);
}

testFs.writeFileSync(PERFORMANCE_TESTS_FILE, [...newRunnableTests].join('\n'));
};

export default function getPerformanceTests() {
const regenerateIndex = process.argv.indexOf('--regenerate');
if (regenerateIndex !== -1) {
const nextArg = process.argv[regenerateIndex + 1];
const numberOfTest = nextArg === undefined ? DEFAULT_NUMBER_OF_TEST : parseInt(nextArg, 10);

if (isNaN(numberOfTest) || numberOfTest < 1) {
throw new Error('The parameter for the number of tests must be a positive number.');
}

createNewRunnableTests(numberOfTest);
}

return testFs.readFileSync(PERFORMANCE_TESTS_FILE).split(/\r?\n/);
}

0 comments on commit 46e6cf3

Please sign in to comment.