feat(test): use structured data for JavaScript errors in tests (#14287)

This commit rewrites test runner to send structured error data from JavaScript
to Rust instead of passing strings. This will allow to customize display of errors
in test report (which will be addressed in follow up commits).
This commit is contained in:
Bartek Iwańczuk 2022-04-16 19:51:12 +02:00 committed by GitHub
parent 32aaefd9ee
commit 5f2d9a4a22
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 98 additions and 23 deletions

View File

@ -8,6 +8,7 @@ use crate::checksum;
use crate::create_main_worker;
use crate::emit;
use crate::flags;
use crate::fmt_errors::PrettyJsError;
use crate::located_script_name;
use crate::lsp::client::Client;
use crate::lsp::client::TestingNotification;
@ -797,10 +798,14 @@ impl test::TestReporter for LspTestReporter {
test: desc.into(),
})
}
test::TestResult::Failed(message) => {
test::TestResult::Failed(js_error) => {
let err_string = PrettyJsError::create(*js_error.clone())
.to_string()
.trim_start_matches("Uncaught ")
.to_string();
self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.into(),
messages: as_test_messages(message, false),
messages: as_test_messages(err_string, false),
duration: Some(elapsed as u32),
})
}
@ -839,9 +844,13 @@ impl test::TestReporter for LspTestReporter {
test: desc.into(),
})
}
test::TestStepResult::Failed(message) => {
let messages = if let Some(message) = message {
as_test_messages(message, false)
test::TestStepResult::Failed(js_error) => {
let messages = if let Some(js_error) = js_error {
let err_string = PrettyJsError::create(*js_error.clone())
.to_string()
.trim_start_matches("Uncaught ")
.to_string();
as_test_messages(err_string, false)
} else {
vec![]
};

View File

@ -5,7 +5,7 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
compat/test_runner/cjs.js > Failed assertion
AssertionError [ERR_ASSERTION]: Values are not strictly equal:
AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@ -14,6 +14,8 @@ AssertionError [ERR_ASSERTION]: Values are not strictly equal:
- 10
+ 20
Error.captureStackTrace(this, stackStartFn || stackStartFunction);
^
[WILDCARD]
failures:

View File

@ -5,7 +5,7 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
compat/test_runner/esm.mjs > Failed assertion
AssertionError [ERR_ASSERTION]: Values are not strictly equal:
AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@ -14,6 +14,8 @@ AssertionError [ERR_ASSERTION]: Values are not strictly equal:
- 10
+ 20
Error.captureStackTrace(this, stackStartFn || stackStartFunction);
^
[WILDCARD]
failures:

View File

@ -8,18 +8,24 @@ failures:
test/exit_sanitizer.ts > exit(0)
AssertionError: Test case attempted to exit with exit code: 0
Deno.exit(0);
^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:2:8
at [WILDCARD]
test/exit_sanitizer.ts > exit(1)
AssertionError: Test case attempted to exit with exit code: 1
Deno.exit(1);
^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:6:8
at [WILDCARD]
test/exit_sanitizer.ts > exit(2)
AssertionError: Test case attempted to exit with exit code: 2
Deno.exit(2);
^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:10:8
at [WILDCARD]

View File

@ -15,51 +15,71 @@ failures:
test/fail.ts > test 0
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:2:9
at [WILDCARD]
test/fail.ts > test 1
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:5:9
at [WILDCARD]
test/fail.ts > test 2
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:8:9
at [WILDCARD]
test/fail.ts > test 3
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:11:9
at [WILDCARD]
test/fail.ts > test 4
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:14:9
at [WILDCARD]
test/fail.ts > test 5
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:17:9
at [WILDCARD]
test/fail.ts > test 6
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:20:9
at [WILDCARD]
test/fail.ts > test 7
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:23:9
at [WILDCARD]
test/fail.ts > test 8
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:26:9
at [WILDCARD]
test/fail.ts > test 9
Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:29:9
at [WILDCARD]

View File

@ -6,6 +6,8 @@ failures:
test/fail_fast.ts > test 1
Error
throw new Error();
^
at [WILDCARD]/test/fail_fast.ts:2:9
at [WILDCARD]

View File

@ -7,6 +7,8 @@ failures:
test/finally_timeout.ts > error
Error: fail
throw new Error("fail");
^
at [WILDCARD]/test/finally_timeout.ts:4:11
at [WILDCARD]

View File

@ -4,6 +4,8 @@ nested failure ...
step 1 ...
inner 1 ... FAILED ([WILDCARD])
Error: Failed.
throw new Error("Failed.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
[WILDCARD]
inner 2 ... ok ([WILDCARD])
@ -12,15 +14,21 @@ FAILED ([WILDCARD])
multiple test step failures ...
step 1 ... FAILED ([WILDCARD])
Error: Fail.
throw new Error("Fail.");
^
[WILDCARD]
step 2 ... FAILED ([WILDCARD])
Error: Fail.
await t.step("step 2", () => Promise.reject(new Error("Fail.")));
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
[WILDCARD]
FAILED ([WILDCARD])
failing step in failing test ...
step 1 ... FAILED ([WILDCARD])
Error: Fail.
throw new Error("Fail.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
at [WILDCARD]
FAILED ([WILDCARD])
@ -39,6 +47,8 @@ Error: 2 test steps failed.
test/steps/failing_steps.ts > failing step in failing test
Error: Fail test.
throw new Error("Fail test.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
at [WILDCARD]

View File

@ -14,6 +14,8 @@ inner missing await ...
at testStepSanitizer [WILDCARD]
FAILED ([WILDCARD])
Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
await t.step("step", (t) => {
^
at postValidation [WILDCARD]
at testStepSanitizer [WILDCARD]
at async fn ([WILDCARD]/invalid_usage.ts:[WILDCARD])
@ -24,6 +26,8 @@ parallel steps with sanitizers ...
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps with sanitizers > step 1
await t.step("step 2", () => {});
^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@ -34,6 +38,8 @@ parallel steps when first has sanitizer ...
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps when first has sanitizer > step 1
await t.step({
^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@ -44,6 +50,8 @@ parallel steps when second has sanitizer ...
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps when second has sanitizer > step 1
await t.step({
^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@ -57,6 +65,8 @@ parallel steps where only inner tests have sanitizers ...
step inner ... FAILED ([WILDCARD])
Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps where only inner tests have sanitizers > step 1
await t.step({
^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@ -67,6 +77,8 @@ failures:
test/steps/invalid_usage.ts > capturing
Error: Cannot run test step after parent scope has finished execution. Ensure any `.step(...)` calls are executed before their parent scope completes execution.
await capturedContext.step("next step", () => {});
^
at TestContext.step ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
at [WILDCARD]

View File

@ -13,6 +13,7 @@ use crate::file_watcher::ResolutionResult;
use crate::flags::Flags;
use crate::flags::TestFlags;
use crate::flags::TypeCheckMode;
use crate::fmt_errors::PrettyJsError;
use crate::fs_util::collect_specifiers;
use crate::fs_util::is_supported_test_ext;
use crate::fs_util::is_supported_test_path;
@ -31,6 +32,7 @@ use deno_ast::swc::common::comments::CommentKind;
use deno_ast::MediaType;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::error::JsError;
use deno_core::futures::future;
use deno_core::futures::stream;
use deno_core::futures::FutureExt;
@ -92,7 +94,7 @@ pub enum TestOutput {
pub enum TestResult {
Ok,
Ignored,
Failed(String),
Failed(Box<JsError>),
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
@ -108,15 +110,15 @@ pub struct TestStepDescription {
pub enum TestStepResult {
Ok,
Ignored,
Failed(Option<String>),
Pending(Option<String>),
Failed(Option<Box<JsError>>),
Pending(Option<Box<JsError>>),
}
impl TestStepResult {
fn error(&self) -> Option<&str> {
fn error(&self) -> Option<&JsError> {
match self {
TestStepResult::Failed(Some(text)) => Some(text.as_str()),
TestStepResult::Pending(Some(text)) => Some(text.as_str()),
TestStepResult::Failed(Some(error)) => Some(error),
TestStepResult::Pending(Some(error)) => Some(error),
_ => None,
}
}
@ -154,7 +156,7 @@ pub struct TestSummary {
pub ignored_steps: usize,
pub filtered_out: usize,
pub measured: usize,
pub failures: Vec<(TestDescription, String)>,
pub failures: Vec<(TestDescription, Box<JsError>)>,
}
#[derive(Debug, Clone, Deserialize)]
@ -294,8 +296,12 @@ impl PrettyTestReporter {
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
);
if let Some(error_text) = result.error() {
for line in error_text.lines() {
if let Some(js_error) = result.error() {
let err_string = PrettyJsError::create(js_error.clone())
.to_string()
.trim_start_matches("Uncaught ")
.to_string();
for line in err_string.lines() {
println!("{}{}", " ".repeat(description.level + 1), line);
}
}
@ -445,7 +451,7 @@ impl TestReporter for PrettyTestReporter {
fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) {
if !summary.failures.is_empty() {
println!("\nfailures:\n");
for (description, error) in &summary.failures {
for (description, js_error) in &summary.failures {
println!(
"{} {} {}",
colors::gray(
@ -454,7 +460,11 @@ impl TestReporter for PrettyTestReporter {
colors::gray(">"),
description.name
);
println!("{}", error);
let err_string = PrettyJsError::create(*js_error.clone())
.to_string()
.trim_start_matches("Uncaught ")
.to_string();
println!("{}", err_string);
println!();
}

View File

@ -804,7 +804,7 @@
await test.fn(step);
const failCount = step.failedChildStepsCount();
return failCount === 0 ? "ok" : {
"failed": formatError(
"failed": core.destructureError(
new Error(
`${failCount} test step${failCount === 1 ? "" : "s"} failed.`,
),
@ -812,7 +812,7 @@
};
} catch (error) {
return {
"failed": formatError(error),
"failed": core.destructureError(error),
};
} finally {
step.finalized = true;
@ -1211,11 +1211,11 @@
return "ignored";
case "pending":
return {
"pending": this.error && formatError(this.error),
"pending": this.error && core.destructureError(this.error),
};
case "failed":
return {
"failed": this.error && formatError(this.error),
"failed": this.error && core.destructureError(this.error),
};
default:
throw new Error(`Unhandled status: ${this.status}`);
@ -1335,7 +1335,7 @@
subStep.status = "ok";
}
} catch (error) {
subStep.error = formatError(error);
subStep.error = error;
subStep.status = "failed";
}