Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions crates/karva/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,7 @@ pub(crate) fn test(args: TestCommand) -> Result<ExitStatus> {
let num_workers = args.num_workers;
let dry_run = args.dry_run;
let watch = args.watch;
let last_failed = args.last_failed;

if watch && dry_run {
anyhow::bail!("`--watch` and `--dry-run` cannot be used together");
Expand Down Expand Up @@ -346,6 +347,7 @@ pub(crate) fn test(args: TestCommand) -> Result<ExitStatus> {
num_workers,
no_cache,
create_ctrlc_handler: true,
last_failed,
};

if watch {
Expand Down
133 changes: 133 additions & 0 deletions crates/karva/tests/it/last_failed.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
use insta_cmd::assert_cmd_snapshot;

use crate::common::TestContext;

#[test]
fn last_failed_reruns_only_failures() {
let context = TestContext::with_files([(
"test_a.py",
"
def test_pass(): pass
def test_fail(): assert False
",
)]);

context.command_no_parallel().output().unwrap();

assert_cmd_snapshot!(context.command_no_parallel().arg("--last-failed"), @r"
success: false
exit_code: 1
----- stdout -----
test test_a::test_fail ... FAILED

diagnostics:

error[test-failure]: Test `test_fail` failed
--> test_a.py:3:5
|
2 | def test_pass(): pass
3 | def test_fail(): assert False
| ^^^^^^^^^
|
info: Test failed here
--> test_a.py:3:1
|
2 | def test_pass(): pass
3 | def test_fail(): assert False
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|

test result: FAILED. 0 passed; 1 failed; 0 skipped; finished in [TIME]

----- stderr -----
");
}

#[test]
fn last_failed_lf_alias() {
let context = TestContext::with_files([(
"test_a.py",
"
def test_pass(): pass
def test_fail(): assert False
",
)]);

context.command_no_parallel().output().unwrap();

assert_cmd_snapshot!(context.command_no_parallel().arg("--lf"), @r"
success: false
exit_code: 1
----- stdout -----
test test_a::test_fail ... FAILED

diagnostics:

error[test-failure]: Test `test_fail` failed
--> test_a.py:3:5
|
2 | def test_pass(): pass
3 | def test_fail(): assert False
| ^^^^^^^^^
|
info: Test failed here
--> test_a.py:3:1
|
2 | def test_pass(): pass
3 | def test_fail(): assert False
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|

test result: FAILED. 0 passed; 1 failed; 0 skipped; finished in [TIME]

----- stderr -----
");
}

#[test]
fn last_failed_with_no_previous_failures_runs_all() {
let context = TestContext::with_files([(
"test_a.py",
"
def test_one(): pass
def test_two(): pass
",
)]);

context.command_no_parallel().output().unwrap();

assert_cmd_snapshot!(context.command_no_parallel().arg("--last-failed"), @r"
success: true
exit_code: 0
----- stdout -----
test test_a::test_one ... ok
test test_a::test_two ... ok

test result: ok. 2 passed; 0 failed; 0 skipped; finished in [TIME]

----- stderr -----
");
}

#[test]
fn last_failed_without_previous_run_runs_all() {
let context = TestContext::with_files([(
"test_a.py",
"
def test_one(): pass
def test_two(): pass
",
)]);

assert_cmd_snapshot!(context.command_no_parallel().arg("--last-failed"), @r"
success: true
exit_code: 0
----- stdout -----
test test_a::test_one ... ok
test test_a::test_two ... ok

test result: ok. 2 passed; 0 failed; 0 skipped; finished in [TIME]

----- stderr -----
");
}
1 change: 1 addition & 0 deletions crates/karva/tests/it/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,6 @@ mod cache;
mod configuration;
mod discovery;
mod extensions;
mod last_failed;
mod name_filter;
mod watch;
1 change: 1 addition & 0 deletions crates/karva_benchmark/src/walltime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ fn test_project(project: &Project) {
num_workers,
no_cache: false,
create_ctrlc_handler: false,
last_failed: false,
};

let args = SubTestCommand {
Expand Down
51 changes: 49 additions & 2 deletions crates/karva_cache/src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,16 @@ use karva_diagnostic::{TestResultStats, TestRunResult};
use ruff_db::diagnostic::{DisplayDiagnosticConfig, DisplayDiagnostics, FileResolver};

use crate::{
DIAGNOSTICS_FILE, DISCOVER_DIAGNOSTICS_FILE, DURATIONS_FILE, FAIL_FAST_SIGNAL_FILE, RunHash,
STATS_FILE, worker_folder,
DIAGNOSTICS_FILE, DISCOVER_DIAGNOSTICS_FILE, DURATIONS_FILE, FAIL_FAST_SIGNAL_FILE,
FAILED_TESTS_FILE, LAST_FAILED_FILE, RunHash, STATS_FILE, worker_folder,
};

/// Aggregated test results collected from all worker processes.
pub struct AggregatedResults {
pub stats: TestResultStats,
pub diagnostics: String,
pub discovery_diagnostics: String,
pub failed_tests: Vec<String>,
}

/// Reads and writes test results in the cache directory for a specific run.
Expand Down Expand Up @@ -49,6 +50,7 @@ impl Cache {
let mut test_stats = TestResultStats::default();
let mut all_diagnostics = String::new();
let mut all_discovery_diagnostics = String::new();
let mut all_failed_tests = Vec::new();

if self.run_dir.exists() {
let mut worker_dirs: Vec<Utf8PathBuf> = fs::read_dir(&self.run_dir)?
Expand All @@ -74,6 +76,7 @@ impl Cache {
&mut test_stats,
&mut all_diagnostics,
&mut all_discovery_diagnostics,
&mut all_failed_tests,
)?;
}
}
Expand All @@ -82,6 +85,7 @@ impl Cache {
stats: test_stats,
diagnostics: all_diagnostics,
discovery_diagnostics: all_discovery_diagnostics,
failed_tests: all_failed_tests,
})
}

Expand Down Expand Up @@ -116,6 +120,17 @@ impl Cache {
let json = serde_json::to_string_pretty(result.durations())?;
fs::write(&durations_path, json)?;

if !result.failed_tests().is_empty() {
let failed_tests: Vec<String> = result
.failed_tests()
.iter()
.map(ToString::to_string)
.collect();
let failed_path = worker_dir.join(FAILED_TESTS_FILE);
let json = serde_json::to_string_pretty(&failed_tests)?;
fs::write(failed_path, json)?;
}

Ok(())
}
}
Expand All @@ -126,6 +141,7 @@ fn read_worker_results(
aggregated_stats: &mut TestResultStats,
all_diagnostics: &mut String,
all_discovery_diagnostics: &mut String,
all_failed_tests: &mut Vec<String>,
) -> Result<()> {
let stats_path = worker_dir.join(STATS_FILE);

Expand All @@ -147,9 +163,40 @@ fn read_worker_results(
all_discovery_diagnostics.push_str(&content);
}

let failed_tests_path = worker_dir.join(FAILED_TESTS_FILE);
if failed_tests_path.exists() {
let content = fs::read_to_string(&failed_tests_path)?;
let failed_tests: Vec<String> = serde_json::from_str(&content)?;
all_failed_tests.extend(failed_tests);
}

Ok(())
}

/// Writes the list of failed tests to the cache directory root.
///
/// This overwrites any previous last-failed list.
pub fn write_last_failed(cache_dir: &Utf8Path, failed_tests: &[String]) -> Result<()> {
fs::create_dir_all(cache_dir)?;
let path = cache_dir.join(LAST_FAILED_FILE);
let json = serde_json::to_string_pretty(failed_tests)?;
fs::write(path, json)?;
Ok(())
}

/// Reads the list of previously failed tests from the cache directory root.
///
/// Returns an empty list if the file does not exist.
pub fn read_last_failed(cache_dir: &Utf8Path) -> Result<Vec<String>> {
let path = cache_dir.join(LAST_FAILED_FILE);
if !path.exists() {
return Ok(Vec::new());
}
let content = fs::read_to_string(&path)?;
let failed_tests: Vec<String> = serde_json::from_str(&content)?;
Ok(failed_tests)
}

/// Reads durations from the most recent test run.
///
/// Finds the most recent `run-{timestamp}` directory, then aggregates
Expand Down
5 changes: 4 additions & 1 deletion crates/karva_cache/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ pub(crate) mod cache;
pub(crate) mod hash;

pub use cache::{
AggregatedResults, Cache, PruneResult, clean_cache, prune_cache, read_recent_durations,
AggregatedResults, Cache, PruneResult, clean_cache, prune_cache, read_last_failed,
read_recent_durations, write_last_failed,
};
pub use hash::RunHash;

Expand All @@ -11,7 +12,9 @@ pub(crate) const STATS_FILE: &str = "stats.json";
pub(crate) const DIAGNOSTICS_FILE: &str = "diagnostics.txt";
pub(crate) const DISCOVER_DIAGNOSTICS_FILE: &str = "discover_diagnostics.txt";
pub(crate) const DURATIONS_FILE: &str = "durations.json";
pub(crate) const FAILED_TESTS_FILE: &str = "failed_tests.json";
const FAIL_FAST_SIGNAL_FILE: &str = "fail-fast";
const LAST_FAILED_FILE: &str = "last-failed.json";

pub(crate) fn worker_folder(worker_id: usize) -> String {
format!("worker-{worker_id}")
Expand Down
4 changes: 4 additions & 0 deletions crates/karva_cli/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,10 @@ pub struct TestCommand {
/// Re-run tests when Python source files change.
#[clap(long)]
pub watch: bool,

/// Re-run only the tests that failed in the previous run.
#[clap(long, alias = "lf")]
pub last_failed: bool,
}

impl TestCommand {
Expand Down
12 changes: 12 additions & 0 deletions crates/karva_diagnostic/src/result.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ pub struct TestRunResult {
stats: TestResultStats,

durations: HashMap<QualifiedFunctionName, std::time::Duration>,

/// Names of tests that failed during this run.
failed_tests: Vec<QualifiedFunctionName>,
}

impl TestRunResult {
Expand Down Expand Up @@ -72,6 +75,11 @@ impl TestRunResult {
) {
self.stats.add(result.clone().into());

if matches!(result, IndividualTestResultKind::Failed) {
self.failed_tests
.push(test_case_name.function_name().clone());
}

if let Some(reporter) = reporter {
reporter.report_test_case_result(test_case_name, result);
}
Expand All @@ -91,6 +99,10 @@ impl TestRunResult {
pub const fn durations(&self) -> &HashMap<QualifiedFunctionName, std::time::Duration> {
&self.durations
}

pub fn failed_tests(&self) -> &[QualifiedFunctionName] {
&self.failed_tests
}
}

#[derive(Debug, Clone)]
Expand Down
28 changes: 26 additions & 2 deletions crates/karva_runner/src/orchestration.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::collections::HashSet;
use std::process::{Child, Command, Stdio};
use std::time::{Duration, Instant};

Expand All @@ -6,7 +7,10 @@ use camino::Utf8PathBuf;
use crossbeam_channel::{Receiver, TryRecvError};

use crate::shutdown::shutdown_receiver;
use karva_cache::{AggregatedResults, CACHE_DIR, Cache, RunHash, read_recent_durations};
use karva_cache::{
AggregatedResults, CACHE_DIR, Cache, RunHash, read_last_failed, read_recent_durations,
write_last_failed,
};
use karva_cli::SubTestCommand;
use karva_collector::{CollectedPackage, CollectionSettings};
use karva_logging::time::format_duration;
Expand Down Expand Up @@ -133,6 +137,8 @@ pub struct ParallelTestConfig {
/// Ctrl+C and gracefully stop workers. Set to `false` in contexts where
/// the handler should not be installed (e.g., benchmarks).
pub create_ctrlc_handler: bool,
/// When `true`, only tests that failed in the previous run will be executed.
pub last_failed: bool,
}

/// Spawn worker processes for each partition
Expand Down Expand Up @@ -262,7 +268,21 @@ pub fn run_parallel_tests(
);
}

let partitions = partition_collected_tests(&collected, num_workers, &previous_durations);
let last_failed_set: HashSet<String> = if config.last_failed {
read_last_failed(&cache_dir)
.unwrap_or_default()
.into_iter()
.collect()
} else {
HashSet::new()
};

let partitions = partition_collected_tests(
&collected,
num_workers,
&previous_durations,
&last_failed_set,
);

let run_hash = RunHash::current_time();

Expand All @@ -289,6 +309,10 @@ pub fn run_parallel_tests(

let result = cache.aggregate_results()?;

if !config.no_cache {
let _ = write_last_failed(&cache_dir, &result.failed_tests);
}

Ok(result)
}

Expand Down
Loading