diff --git a/doc/api/test.md b/doc/api/test.md index 771563f013f87d..26c19b8aab5925 100644 --- a/doc/api/test.md +++ b/doc/api/test.md @@ -3042,6 +3042,31 @@ This event is only emitted if `--test` flag is passed. This event is not guaranteed to be emitted in the same order as the tests are defined. +### Event: `'test:summary'` + +* `data` {Object} + * `counts` {Object} An object containing the counts of various test results. + * `cancelled` {number} The total number of cancelled tests. + * `failed` {number} The total number of failed tests. + * `passed` {number} The total number of passed tests. + * `skipped` {number} The total number of skipped tests. + * `suites` {number} The total number of suites run. + * `tests` {number} The total number of tests run, excluding suites. + * `todo` {number} The total number of TODO tests. + * `topLevel` {number} The total number of top level tests and suites. + * `duration_ms` {number} The duration of the test run in milliseconds. + * `file` {string|undefined} The path of the test file that generated the + summary. If the summary corresponds to multiple files, this value is + `undefined`. + * `success` {boolean} Indicates whether or not the test run is considered + successful or not. If any error condition occurs, such as a failing test or + unmet coverage threshold, this value will be set to `false`. + +Emitted when a test run completes. This event contains metrics pertaining to +the completed test run, and is useful for determining if a test run passed or +failed. If process-level test isolation is used, a `'test:summary'` event is +generated for each test file in addition to a final cumulative summary. + ### Event: `'test:watch:drained'` Emitted when no more tests are queued for execution in watch mode. diff --git a/lib/internal/main/test_runner.js b/lib/internal/main/test_runner.js index b1f69b07771ac6..87c7dca048b72d 100644 --- a/lib/internal/main/test_runner.js +++ b/lib/internal/main/test_runner.js @@ -31,8 +31,8 @@ if (isUsingInspector() && options.isolation === 'process') { options.globPatterns = ArrayPrototypeSlice(process.argv, 1); debug('test runner configuration:', options); -run(options).on('test:fail', (data) => { - if (data.todo === undefined || data.todo === false) { +run(options).on('test:summary', (data) => { + if (!data.success) { process.exitCode = kGenericUserError; } }); diff --git a/lib/internal/test_runner/harness.js b/lib/internal/test_runner/harness.js index 721d99826ade13..718e15cf024745 100644 --- a/lib/internal/test_runner/harness.js +++ b/lib/internal/test_runner/harness.js @@ -52,7 +52,7 @@ function createTestTree(rootTestOptions, globalOptions) { resetCounters() { harness.counters = { __proto__: null, - all: 0, + tests: 0, failed: 0, passed: 0, cancelled: 0, @@ -62,6 +62,7 @@ function createTestTree(rootTestOptions, globalOptions) { suites: 0, }; }, + success: true, counters: null, shouldColorizeTestFiles: shouldColorizeTestFiles(globalOptions.destinations), teardown: null, @@ -130,6 +131,7 @@ function createProcessEventHandler(eventName, rootTest) { } rootTest.diagnostic(msg); + rootTest.harness.success = false; process.exitCode = kGenericUserError; return; } @@ -152,6 +154,7 @@ function configureCoverage(rootTest, globalOptions) { const msg = `Warning: Code coverage could not be enabled. ${err}`; rootTest.diagnostic(msg); + rootTest.harness.success = false; process.exitCode = kGenericUserError; } } @@ -167,6 +170,7 @@ function collectCoverage(rootTest, coverage) { summary = coverage.summary(); } catch (err) { rootTest.diagnostic(`Warning: Could not report code coverage. ${err}`); + rootTest.harness.success = false; process.exitCode = kGenericUserError; } @@ -174,6 +178,7 @@ function collectCoverage(rootTest, coverage) { coverage.cleanup(); } catch (err) { rootTest.diagnostic(`Warning: Could not clean up code coverage. ${err}`); + rootTest.harness.success = false; process.exitCode = kGenericUserError; } @@ -248,14 +253,16 @@ function lazyBootstrapRoot() { if (!globalRoot) { // This is where the test runner is bootstrapped when node:test is used // without the --test flag or the run() API. + const entryFile = process.argv?.[1]; const rootTestOptions = { __proto__: null, - entryFile: process.argv?.[1], + entryFile, + loc: entryFile ? [1, 1, entryFile] : undefined, }; const globalOptions = parseCommandLine(); createTestTree(rootTestOptions, globalOptions); - globalRoot.reporter.on('test:fail', (data) => { - if (data.todo === undefined || data.todo === false) { + globalRoot.reporter.on('test:summary', (data) => { + if (!data.success) { process.exitCode = kGenericUserError; } }); diff --git a/lib/internal/test_runner/test.js b/lib/internal/test_runner/test.js index 029887bf18799b..4331079f74e95a 100644 --- a/lib/internal/test_runner/test.js +++ b/lib/internal/test_runner/test.js @@ -1043,14 +1043,15 @@ class Test extends AsyncResource { reporter.diagnostic(nesting, loc, diagnostics[i]); } - reporter.diagnostic(nesting, loc, `tests ${harness.counters.all}`); + const duration = this.duration(); + reporter.diagnostic(nesting, loc, `tests ${harness.counters.tests}`); reporter.diagnostic(nesting, loc, `suites ${harness.counters.suites}`); reporter.diagnostic(nesting, loc, `pass ${harness.counters.passed}`); reporter.diagnostic(nesting, loc, `fail ${harness.counters.failed}`); reporter.diagnostic(nesting, loc, `cancelled ${harness.counters.cancelled}`); reporter.diagnostic(nesting, loc, `skipped ${harness.counters.skipped}`); reporter.diagnostic(nesting, loc, `todo ${harness.counters.todo}`); - reporter.diagnostic(nesting, loc, `duration_ms ${this.duration()}`); + reporter.diagnostic(nesting, loc, `duration_ms ${duration}`); if (coverage) { const coverages = [ @@ -1067,6 +1068,7 @@ class Test extends AsyncResource { for (let i = 0; i < coverages.length; i++) { const { threshold, actual, name } = coverages[i]; if (actual < threshold) { + harness.success = false; process.exitCode = kGenericUserError; reporter.diagnostic(nesting, loc, `Error: ${NumberPrototypeToFixed(actual, 2)}% ${name} coverage does not meet threshold of ${threshold}%.`); } @@ -1075,6 +1077,10 @@ class Test extends AsyncResource { reporter.coverage(nesting, loc, coverage); } + reporter.summary( + nesting, loc?.file, harness.success, harness.counters, duration, + ); + if (harness.watching) { this.reported = false; harness.resetCounters(); diff --git a/lib/internal/test_runner/tests_stream.js b/lib/internal/test_runner/tests_stream.js index 08d4397ae64a3c..ecbc407e01f318 100644 --- a/lib/internal/test_runner/tests_stream.js +++ b/lib/internal/test_runner/tests_stream.js @@ -132,6 +132,16 @@ class TestsStream extends Readable { }); } + summary(nesting, file, success, counts, duration_ms) { + this[kEmitMessage]('test:summary', { + __proto__: null, + success, + counts, + duration_ms, + file, + }); + } + end() { this.#tryPush(null); } diff --git a/lib/internal/test_runner/utils.js b/lib/internal/test_runner/utils.js index 9ad924bcd58793..67ce601c1dfe5f 100644 --- a/lib/internal/test_runner/utils.js +++ b/lib/internal/test_runner/utils.js @@ -357,12 +357,14 @@ function countCompletedTest(test, harness = test.root.harness) { harness.counters.todo++; } else if (test.cancelled) { harness.counters.cancelled++; + harness.success = false; } else if (!test.passed) { harness.counters.failed++; + harness.success = false; } else { harness.counters.passed++; } - harness.counters.all++; + harness.counters.tests++; } diff --git a/test/parallel/test-runner-reporters.js b/test/parallel/test-runner-reporters.js index f3adae7ab6dd10..b557cef1b9bef8 100644 --- a/test/parallel/test-runner-reporters.js +++ b/test/parallel/test-runner-reporters.js @@ -113,7 +113,7 @@ describe('node:test reporters', { concurrency: true }, () => { testFile]); assert.strictEqual(child.stderr.toString(), ''); const stdout = child.stdout.toString(); - assert.match(stdout, /{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:diagnostic":\d+}$/); + assert.match(stdout, /{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:summary":2,"test:diagnostic":\d+}$/); assert.strictEqual(stdout.slice(0, filename.length + 2), `${filename} {`); }); }); @@ -125,7 +125,7 @@ describe('node:test reporters', { concurrency: true }, () => { assert.strictEqual(child.stderr.toString(), ''); assert.match( child.stdout.toString(), - /^package: reporter-cjs{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:diagnostic":\d+}$/, + /^package: reporter-cjs{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:summary":2,"test:diagnostic":\d+}$/, ); }); @@ -136,7 +136,7 @@ describe('node:test reporters', { concurrency: true }, () => { assert.strictEqual(child.stderr.toString(), ''); assert.match( child.stdout.toString(), - /^package: reporter-esm{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:diagnostic":\d+}$/, + /^package: reporter-esm{"test:enqueue":5,"test:dequeue":5,"test:complete":5,"test:start":4,"test:pass":2,"test:fail":2,"test:plan":2,"test:summary":2,"test:diagnostic":\d+}$/, ); });