From bec7f7a860ddc27c584e594d9ba6627f5e8fa461 Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Sun, 29 Mar 2026 16:12:44 +0000 Subject: [PATCH] refactor: parallelize realm inspection in inspect::run Changed the sequential `for` loop over realms in `inspect::run` to spawn a `tokio::task::JoinSet`, fetching multiple realms in parallel. Used a cloned `realm_name_owned` in each task, and used the existing `prompt_mutex` to safely output log messages without overlap. Co-authored-by: ffalcinelli <1167082+ffalcinelli@users.noreply.github.com> --- src/inspect.rs | 47 +++++++++++++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/src/inspect.rs b/src/inspect.rs index 3f4f407..817b955 100644 --- a/src/inspect.rs +++ b/src/inspect.rs @@ -44,26 +44,41 @@ pub async fn run( let all_secrets = Arc::new(Mutex::new(HashMap::new())); let prompt_mutex = Arc::new(Mutex::new(())); + let mut set = tokio::task::JoinSet::new(); + for realm_name in realms { let mut realm_client = client.clone(); realm_client.set_target_realm(realm_name.clone()); let realm_dir = workspace_dir.join(&realm_name); - println!( - "\n{} {}", - SEARCH, - style(format!("Inspecting realm: {}", realm_name)) - .cyan() - .bold() - ); - inspect_realm( - &realm_client, - &realm_name, - realm_dir, - Arc::clone(&all_secrets), - yes, - Arc::clone(&prompt_mutex), - ) - .await?; + let all_secrets = Arc::clone(&all_secrets); + let prompt_mutex = Arc::clone(&prompt_mutex); + let realm_name_owned = realm_name.clone(); + + set.spawn(async move { + { + let _lock = prompt_mutex.lock().await; + println!( + "\n{} {}", + SEARCH, + style(format!("Inspecting realm: {}", realm_name_owned)) + .cyan() + .bold() + ); + } + inspect_realm( + &realm_client, + &realm_name_owned, + realm_dir, + all_secrets, + yes, + prompt_mutex, + ) + .await + }); + } + + while let Some(res) = set.join_next().await { + res.context("Task panicked")??; } let secrets_lock = all_secrets.lock().await;