mirror of
https://github.com/nodejs/node.git
synced 2024-11-21 10:59:27 +00:00
test_runner: add 'test:summary' event
This commit adds a new 'test:summary' event to the test runner's reporting interface. This new event serves two purposes: - In the future, the test runner internals will no longer need to change the process exit code. This may be important to run() users. Unfortunately, this is a breaking change, so it needs to be changed in a major version. - The reporting interface now has a single event that can identify passing or failing test runs. Refs: https://github.com/nodejs/node/issues/53867 Refs: https://github.com/nodejs/node/issues/54812 PR-URL: https://github.com/nodejs/node/pull/54851 Reviewed-By: Chemi Atlow <chemi@atlow.co.il> Reviewed-By: Moshe Atlow <moshe@atlow.co.il> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
This commit is contained in:
parent
5a966714c1
commit
059e08bb21
@ -3042,6 +3042,31 @@ This event is only emitted if `--test` flag is passed.
|
||||
This event is not guaranteed to be emitted in the same order as the tests are
|
||||
defined.
|
||||
|
||||
### Event: `'test:summary'`
|
||||
|
||||
* `data` {Object}
|
||||
* `counts` {Object} An object containing the counts of various test results.
|
||||
* `cancelled` {number} The total number of cancelled tests.
|
||||
* `failed` {number} The total number of failed tests.
|
||||
* `passed` {number} The total number of passed tests.
|
||||
* `skipped` {number} The total number of skipped tests.
|
||||
* `suites` {number} The total number of suites run.
|
||||
* `tests` {number} The total number of tests run, excluding suites.
|
||||
* `todo` {number} The total number of TODO tests.
|
||||
* `topLevel` {number} The total number of top level tests and suites.
|
||||
* `duration_ms` {number} The duration of the test run in milliseconds.
|
||||
* `file` {string|undefined} The path of the test file that generated the
|
||||
summary. If the summary corresponds to multiple files, this value is
|
||||
`undefined`.
|
||||
* `success` {boolean} Indicates whether or not the test run is considered
|
||||
successful or not. If any error condition occurs, such as a failing test or
|
||||
unmet coverage threshold, this value will be set to `false`.
|
||||
|
||||
Emitted when a test run completes. This event contains metrics pertaining to
|
||||
the completed test run, and is useful for determining if a test run passed or
|
||||
failed. If process-level test isolation is used, a `'test:summary'` event is
|
||||
generated for each test file in addition to a final cumulative summary.
|
||||
|
||||
### Event: `'test:watch:drained'`
|
||||
|
||||
Emitted when no more tests are queued for execution in watch mode.
|
||||
|
@ -31,8 +31,8 @@ if (isUsingInspector() && options.isolation === 'process') {
|
||||
options.globPatterns = ArrayPrototypeSlice(process.argv, 1);
|
||||
|
||||
debug('test runner configuration:', options);
|
||||
run(options).on('test:fail', (data) => {
|
||||
if (data.todo === undefined || data.todo === false) {
|
||||
run(options).on('test:summary', (data) => {
|
||||
if (!data.success) {
|
||||
process.exitCode = kGenericUserError;
|
||||
}
|
||||
});
|
||||
|
@ -52,7 +52,7 @@ function createTestTree(rootTestOptions, globalOptions) {
|
||||
resetCounters() {
|
||||
harness.counters = {
|
||||
__proto__: null,
|
||||
all: 0,
|
||||
tests: 0,
|
||||
failed: 0,
|
||||
passed: 0,
|
||||
cancelled: 0,
|
||||
@ -62,6 +62,7 @@ function createTestTree(rootTestOptions, globalOptions) {
|
||||
suites: 0,
|
||||
};
|
||||
},
|
||||
success: true,
|
||||
counters: null,
|
||||
shouldColorizeTestFiles: shouldColorizeTestFiles(globalOptions.destinations),
|
||||
teardown: null,
|
||||
@ -130,6 +131,7 @@ function createProcessEventHandler(eventName, rootTest) {
|
||||
}
|
||||
|
||||
rootTest.diagnostic(msg);
|
||||
rootTest.harness.success = false;
|
||||
process.exitCode = kGenericUserError;
|
||||
return;
|
||||
}
|
||||
@ -152,6 +154,7 @@ function configureCoverage(rootTest, globalOptions) {
|
||||
const msg = `Warning: Code coverage could not be enabled. ${err}`;
|
||||
|
||||
rootTest.diagnostic(msg);
|
||||
rootTest.harness.success = false;
|
||||
process.exitCode = kGenericUserError;
|
||||
}
|
||||
}
|
||||
@ -167,6 +170,7 @@ function collectCoverage(rootTest, coverage) {
|
||||
summary = coverage.summary();
|
||||
} catch (err) {
|
||||
rootTest.diagnostic(`Warning: Could not report code coverage. ${err}`);
|
||||
rootTest.harness.success = false;
|
||||
process.exitCode = kGenericUserError;
|
||||
}
|
||||
|
||||
@ -174,6 +178,7 @@ function collectCoverage(rootTest, coverage) {
|
||||
coverage.cleanup();
|
||||
} catch (err) {
|
||||
rootTest.diagnostic(`Warning: Could not clean up code coverage. ${err}`);
|
||||
rootTest.harness.success = false;
|
||||
process.exitCode = kGenericUserError;
|
||||
}
|
||||
|
||||
@ -248,14 +253,16 @@ function lazyBootstrapRoot() {
|
||||
if (!globalRoot) {
|
||||
// This is where the test runner is bootstrapped when node:test is used
|
||||
// without the --test flag or the run() API.
|
||||
const entryFile = process.argv?.[1];
|
||||
const rootTestOptions = {
|
||||
__proto__: null,
|
||||
entryFile: process.argv?.[1],
|
||||
entryFile,
|
||||
loc: entryFile ? [1, 1, entryFile] : undefined,
|
||||
};
|
||||
const globalOptions = parseCommandLine();
|
||||
createTestTree(rootTestOptions, globalOptions);
|
||||
globalRoot.reporter.on('test:fail', (data) => {
|
||||
if (data.todo === undefined || data.todo === false) {
|
||||
globalRoot.reporter.on('test:summary', (data) => {
|
||||
if (!data.success) {
|
||||
process.exitCode = kGenericUserError;
|
||||
}
|
||||
});
|
||||
|
@ -1043,14 +1043,15 @@ class Test extends AsyncResource {
|
||||
reporter.diagnostic(nesting, loc, diagnostics[i]);
|
||||
}
|
||||
|
||||
reporter.diagnostic(nesting, loc, `tests ${harness.counters.all}`);
|
||||
const duration = this.duration();
|
||||
reporter.diagnostic(nesting, loc, `tests ${harness.counters.tests}`);
|
||||
reporter.diagnostic(nesting, loc, `suites ${harness.counters.suites}`);
|
||||
reporter.diagnostic(nesting, loc, `pass ${harness.counters.passed}`);
|
||||
reporter.diagnostic(nesting, loc, `fail ${harness.counters.failed}`);
|
||||
reporter.diagnostic(nesting, loc, `cancelled ${harness.counters.cancelled}`);
|
||||
reporter.diagnostic(nesting, loc, `skipped ${harness.counters.skipped}`);
|
||||
reporter.diagnostic(nesting, loc, `todo ${harness.counters.todo}`);
|
||||
reporter.diagnostic(nesting, loc, `duration_ms ${this.duration()}`);
|
||||
reporter.diagnostic(nesting, loc, `duration_ms ${duration}`);
|
||||
|
||||
if (coverage) {
|
||||
const coverages = [
|
||||
@ -1067,6 +1068,7 @@ class Test extends AsyncResource {
|
||||
for (let i = 0; i < coverages.length; i++) {
|
||||
const { threshold, actual, name } = coverages[i];
|
||||
if (actual < threshold) {
|
||||
harness.success = false;
|
||||
process.exitCode = kGenericUserError;
|
||||
reporter.diagnostic(nesting, loc, `Error: ${NumberPrototypeToFixed(actual, 2)}% ${name} coverage does not meet threshold of ${threshold}%.`);
|
||||
}
|
||||
@ -1075,6 +1077,10 @@ class Test extends AsyncResource {
|
||||
reporter.coverage(nesting, loc, coverage);
|
||||
}
|
||||
|
||||
reporter.summary(
|
||||
nesting, loc?.file, harness.success, harness.counters, duration,
|
||||
);
|
||||
|
||||
if (harness.watching) {
|
||||
this.reported = false;
|
||||
harness.resetCounters();
|
||||
|
@ -132,6 +132,16 @@ class TestsStream extends Readable {
|
||||
});
|
||||
}
|
||||
|
||||
summary(nesting, file, success, counts, duration_ms) {
|
||||
this[kEmitMessage]('test:summary', {
|
||||
__proto__: null,
|
||||
success,
|
||||
counts,
|
||||
duration_ms,
|
||||
file,
|
||||
});
|
||||
}
|
||||
|
||||
end() {
|
||||
this.#tryPush(null);
|
||||
}
|
||||
|
@ -357,12 +357,14 @@ function countCompletedTest(test, harness = test.root.harness) {
|
||||
harness.counters.todo++;
|
||||
} else if (test.cancelled) {
|
||||
harness.counters.cancelled++;
|
||||
harness.success = false;
|
||||
} else if (!test.passed) {
|
||||
harness.counters.failed++;
|
||||
harness.success = false;
|
||||
} else {
|
||||
harness.counters.passed++;
|
||||
}
|
||||
harness.counters.all++;
|
||||
harness.counters.tests++;
|
||||
}
|
||||
|
||||
|
||||
|
@ -113,7 +113,7 @@ describe('node:test reporters', { concurrency: true }, () => {
|
||||
testFile]);
|
||||
assert.strictEqual(child.stderr.toString(), '');
|
||||
const stdout = child.stdout.toString();
|
||||
assert.match(stdout, /{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:diagnostic":\d+}$/);
|
||||
assert.match(stdout, /{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:summary":2,"test:diagnostic":\d+}$/);
|
||||
assert.strictEqual(stdout.slice(0, filename.length + 2), `${filename} {`);
|
||||
});
|
||||
});
|
||||
@ -125,7 +125,7 @@ describe('node:test reporters', { concurrency: true }, () => {
|
||||
assert.strictEqual(child.stderr.toString(), '');
|
||||
assert.match(
|
||||
child.stdout.toString(),
|
||||
/^package: reporter-cjs{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:diagnostic":\d+}$/,
|
||||
/^package: reporter-cjs{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:summary":2,"test:diagnostic":\d+}$/,
|
||||
);
|
||||
});
|
||||
|
||||
@ -136,7 +136,7 @@ describe('node:test reporters', { concurrency: true }, () => {
|
||||
assert.strictEqual(child.stderr.toString(), '');
|
||||
assert.match(
|
||||
child.stdout.toString(),
|
||||
/^package: reporter-esm{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:diagnostic":\d+}$/,
|
||||
/^package: reporter-esm{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:summary":2,"test:diagnostic":\d+}$/,
|
||||
);
|
||||
});
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user