Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion prover/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,6 @@ shadow-proving
contracts/cache
contracts
out
lib
lib

proof
1 change: 1 addition & 0 deletions prover/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 9 additions & 3 deletions prover/bin/challenge/src/external_sign.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ impl ExternalSign {
let req_data = self.craft_req_data(data, tx_info)?;

let rt = self.do_request(&self.url, &req_data).await?;
log::debug!("ext_sign response: {:?}", rt);
log::info!("ext_sign rt: {:?}", rt);

let response: Response = serde_json::from_str(&rt)?;
if response.result.sign_datas.is_empty() {
Comment on lines 88 to 92
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Reconsider logging full external sign response at info level

rt holds the full JSON response from the external signing service, including signatures and transaction-related data. Logging this at info on every successful call can:

  • Leak user/transaction details and signatures into standard production logs.
  • Significantly increase log volume on a hot path.

Unless you explicitly want this in normal runtime logs, consider either:

-        log::info!("ext_sign rt: {:?}", rt);
+        log::debug!("ext_sign rt: {:?}", rt);

or gating it behind a feature flag / config so it can be enabled only during targeted debugging.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
let rt = self.do_request(&self.url, &req_data).await?;
log::debug!("ext_sign response: {:?}", rt);
log::info!("ext_sign rt: {:?}", rt);
let response: Response = serde_json::from_str(&rt)?;
if response.result.sign_datas.is_empty() {
let rt = self.do_request(&self.url, &req_data).await?;
log::debug!("ext_sign rt: {:?}", rt);
let response: Response = serde_json::from_str(&rt)?;
if response.result.sign_datas.is_empty() {

Expand All @@ -97,6 +97,7 @@ impl ExternalSign {
return Err("ext_sign response sign data invalid".into());
}


let sig = hex::decode(&response.result.sign_datas[0].sign[2..])?;
let signed_tx: Bytes = tx.rlp_signed(&Signature::try_from(sig.as_slice())?);
Ok(signed_tx)
Expand Down Expand Up @@ -139,8 +140,13 @@ impl ExternalSign {
async fn do_request(&self, url: &str, payload: &ReqData) -> Result<String, Box<dyn Error>> {
log::debug!("===payload: {:?}", serde_json::to_string(payload).unwrap());
let response: reqwest::Response = self.client.post(url).json(&payload).send().await?;
if !response.status().is_success() {
return Err(format!("ext_sign response status not ok: {:?}", response.status()).into());
log::info!("===do_request response: {:?}", response);

let status = response.status();
if !status.is_success() {
let text = response.text().await?;
log::info!("===do_request response text: {:?}", &text);
return Err(format!("ext_sign response status not ok: {:?}", status).into());
}
Ok(response.text().await?)
}
Expand Down
6 changes: 4 additions & 2 deletions prover/bin/challenge/src/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ use eyre::anyhow;
use serde::{Deserialize, Serialize};
use std::env::var;
use std::error::Error;
use std::ops::Mul;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
Expand Down Expand Up @@ -175,6 +174,7 @@ impl ChallengeHandler {
if !batch_proof.proof_data.is_empty() {
log::info!("query proof and prove state: {:#?}", batch_index);
let batch_header = batch_info.fill_ext(batch_proof.batch_header.clone()).encode();
sleep(Duration::from_secs(600)).await;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# First, let's examine the code around line 177 in handler.rs with context
echo "=== Code around line 177 ==="
sed -n '160,195p' prover/bin/challenge/src/handler.rs

Repository: morph-l2/morph

Length of output: 1721


🏁 Script executed:

#!/bin/bash
# Check recent commits affecting this file
echo "=== Recent commits for handler.rs ==="
git log --oneline -20 -- prover/bin/challenge/src/handler.rs

echo -e "\n=== Detailed commit info for the sleep addition ==="
# Try to find the commit that added this sleep
git log -p --all -S 'Duration::from_secs(600)' -- prover/bin/challenge/src/handler.rs | head -100

Repository: morph-l2/morph

Length of output: 177


🏁 Script executed:

#!/bin/bash
# Get full commit details
echo "=== Full commit details ==="
git show 6236577

echo -e "\n=== Check for PR related to this commit ==="
# Try to find associated PR
git log --all --grep="Execute before prove" --oneline
git log --all --grep="sleep" --oneline | head -10

Repository: morph-l2/morph

Length of output: 50443


🏁 Script executed:

#!/bin/bash
# Search for comments about timing, proof, or delays near the sleep
echo "=== Searching for related comments or timing logic ==="
rg -n -B5 -A5 "query_proof|prove_state|proof_data" prover/bin/challenge/src/handler.rs | head -80

echo -e "\n=== Searching for any TODO or FIXME comments in handler.rs ==="
rg -n "TODO|FIXME|HACK|XXX" prover/bin/challenge/src/handler.rs

echo -e "\n=== Looking for other timing/delay related code ==="
rg -n "Duration|sleep|delay" prover/bin/challenge/src/handler.rs

Repository: morph-l2/morph

Length of output: 4746


🏁 Script executed:

#!/bin/bash
# Check git history to see if this sleep was added in this commit
echo "=== Check git log with -p limited output for the sleep ==="
git log --all -p -- prover/bin/challenge/src/handler.rs | grep -A10 -B10 "Duration::from_secs(600)" | head -50

echo -e "\n=== Check line 110 context ==="
sed -n '105,115p' prover/bin/challenge/src/handler.rs

echo -e "\n=== Check the full context around both prove_state calls ==="
sed -n '195,210p' prover/bin/challenge/src/handler.rs

Repository: morph-l2/morph

Length of output: 2785


🏁 Script executed:

#!/bin/bash
# Let me verify the full extent of the inconsistency
echo "=== Full comparison of both prove_state call paths ==="
echo "PATH 1 (line 173-179): Early query finds proof"
sed -n '173,179p' prover/bin/challenge/src/handler.rs

echo -e "\nPATH 2 (line 200-205): Status PROVED finds proof"  
sed -n '200,205p' prover/bin/challenge/src/handler.rs

echo -e "\n=== Checking if there's any documentation about L1 finality timing ==="
rg -n "finality|L1|confirmation" prover/bin/challenge/src/handler.rs | head -20

echo -e "\n=== Check challenge deadline or timing constants ==="
rg -n "CHALLENGE|challenge" prover/bin/challenge/src/handler.rs | head -20

Repository: morph-l2/morph

Length of output: 3523


Based on my thorough verification, I have found critical evidence that confirms and extends the original review concern:

Key findings:

  1. The 600-second sleep is newly added in this commit (shown by + in git log)
  2. Inconsistent application: There are two identical code paths that both call prove_state with proof data:
    • PATH 1 (line 173-179): Queries proof → sleeps 600 seconds → calls prove_state
    • PATH 2 (line 200-205): Queries proof → NO sleep → calls prove_state
  3. No explanation: No comment explaining the rationale
  4. Hardcoded value: Not configurable, not environment-variable driven
  5. Challenge deadline concern: The code mentions a challenge period (3-day window), and a 10-minute blocking delay could impact responsiveness

Rewritten Review Comment:

Resolve the inconsistent and unexplained 600-second delay before proving state.

The 600-second (10-minute) blocking sleep at line 177 is newly added but appears only in one of two identical code paths that prove state with proof data:

  • Path 1 (line 173-179): query_proof finds proof → sleeps 600s → prove_state
  • Path 2 (line 200-205): Status PROVED finds proof → no sleepprove_state

Both paths perform identical operations on proof data, yet behave differently. This inconsistency suggests either:

  1. The sleep should be applied to both paths (if needed for L1 finality or other timing requirements), or
  2. It should be removed from line 177 (if it was added as temporary debugging)

The hardcoded 10-minute delay blocks the entire event loop and could impact challenge responsiveness, especially given the code references a challenge period window. Without explanation or configurability, this is difficult to maintain and verify.

Required actions:

  1. Add an explanatory comment clarifying why this specific delay is necessary
  2. Either apply the delay consistently to both code paths or remove it
  3. If this is addressing L1 finality or another timing requirement, make it configurable via environment variable rather than hardcoded
🤖 Prompt for AI Agents
In prover/bin/challenge/src/handler.rs around line 177, remove the unexplained
hardcoded 600-second sleep or make its behavior explicit and consistent: either
apply the same delay to the other identical proof-handling path or remove it
entirely; if the delay is required for L1 finality or timing, replace the
hardcoded value with a configurable Duration loaded from an environment variable
or config (with a sensible default), add a clear comment explaining the
rationale and units, and ensure the implementation uses an async, non-blocking
wait and is applied to both code paths that call prove_state so the behavior is
consistent.

self.prove_state(batch_index, batch_header, batch_proof, l1_rollup).await;
continue;
}
Expand Down Expand Up @@ -462,6 +462,8 @@ async fn batch_inspect(l1_rollup: &RollupType, l1_provider: &Provider<Http>, bat
let withdrawal_root: [u8; 32] = param.batch_data_input.withdrawal_root;
let last_block_number: u64 = param.batch_data_input.last_block_number;
let num_l1_messages = param.batch_data_input.num_l1_messages;
log::info!("======> batch inspect: decode tx.input, version = {:#?}", version);
log::info!("======> batch inspect: decode tx.input, param = {:#?}", param);

let mut batch_info = BatchInfo {
version,
Expand Down Expand Up @@ -574,4 +576,4 @@ pub fn contract_error<M: Middleware>(e: ContractError<M>) -> String {
format!("error: {:?}", e)
};
error_msg
}
}
15 changes: 0 additions & 15 deletions prover/bin/host/build.rs

This file was deleted.

2 changes: 1 addition & 1 deletion prover/bin/host/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ pub fn prove(

// Prepare input.
// Convert the traces' format to reduce conversion costs in the client.
blocks.iter_mut().for_each(|blobk| blobk.flatten());
blocks.iter_mut().for_each(|block| block.flatten());
let client_input =
ClientInput { l2_traces: blocks.clone(), blob_info: get_blob_info(blocks).unwrap() };

Expand Down
69 changes: 52 additions & 17 deletions prover/bin/server/src/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,10 @@ impl Prover {
if read_env_var("SAVE_TRACE", false) {
save_trace(batch_index, block_traces);
}
save_batch_header(block_traces, batch_index);
if !save_batch_header(block_traces, batch_index) {
save_trace(batch_index, block_traces);
continue;
}

// Step3. Generate evm proof
log::info!("Generate evm proof");
Expand All @@ -113,24 +116,40 @@ impl Prover {
}
}

fn save_batch_header(blocks: &mut Vec<BlockTrace>, batch_index: u64) {
blocks.iter_mut().for_each(|blobk| blobk.flatten());
let batch_info = EVMVerifier::verify(blocks).unwrap();
let blob_info = morph_executor_host::get_blob_info(blocks).unwrap();
let (versioned_hash, _) = BlobVerifier::verify(&blob_info, blocks.len()).unwrap();

// Save batch_header
// | batch_data_hash | versioned_hash | sequencer_root |
// |-----------------|----------------|----------------|
// | bytes32 | bytes32 | bytes32 |
let mut batch_header: Vec<u8> = Vec::with_capacity(96);
batch_header.extend_from_slice(&batch_info.data_hash().0);
batch_header.extend_from_slice(&versioned_hash.0);
batch_header.extend_from_slice(&batch_info.sequencer_root().0);
fn save_batch_header(blocks: &mut Vec<BlockTrace>, batch_index: u64) -> bool {
let proof_dir = PROVER_PROOF_DIR.to_string() + format!("/batch_{}", batch_index).as_str();
std::fs::create_dir_all(&proof_dir).expect("failed to create proof path");
let mut batch_file = File::create(format!("{}/batch_header.data", proof_dir)).unwrap();
batch_file.write_all(&batch_header[..]).expect("failed to batch_header");
blocks.iter_mut().for_each(|block| block.flatten());
let verify_result = EVMVerifier::verify(blocks);

if let Ok(batch_info) = verify_result {
let blob_info = morph_executor_host::get_blob_info(blocks).unwrap();
let (versioned_hash, _) = BlobVerifier::verify(&blob_info, blocks.len()).unwrap();

// Save batch_header
// | batch_data_hash | versioned_hash | sequencer_root |
// |-----------------|----------------|----------------|
// | bytes32 | bytes32 | bytes32 |
let mut batch_header: Vec<u8> = Vec::with_capacity(96);
batch_header.extend_from_slice(&batch_info.data_hash().0);
batch_header.extend_from_slice(&versioned_hash.0);
batch_header.extend_from_slice(&batch_info.sequencer_root().0);
let mut batch_file = File::create(format!("{}/batch_header.data", proof_dir)).unwrap();
batch_file.write_all(&batch_header[..]).expect("failed to batch_header");
true
Comment on lines +125 to +139
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Replace panicking unwrap() calls with proper error handling.

Lines 126, 127, and 137 use unwrap(), which will panic and crash the prover service if the operations fail. This could happen due to filesystem issues, invalid data, or blob verification failures.

Consider returning Result<bool, anyhow::Error> from save_batch_header and propagating errors properly:

-fn save_batch_header(blocks: &mut Vec<BlockTrace>, batch_index: u64) -> bool {
+fn save_batch_header(blocks: &mut Vec<BlockTrace>, batch_index: u64) -> Result<bool, anyhow::Error> {
     let proof_dir = PROVER_PROOF_DIR.to_string() + format!("/batch_{}", batch_index).as_str();
     std::fs::create_dir_all(&proof_dir).expect("failed to create proof path");
     blocks.iter_mut().for_each(|block| block.flatten());
     let verify_result = EVMVerifier::verify(blocks);
 
     if let Ok(batch_info) = verify_result {
-        let blob_info = morph_executor_host::get_blob_info(blocks).unwrap();
-        let (versioned_hash, _) = BlobVerifier::verify(&blob_info, blocks.len()).unwrap();
+        let blob_info = morph_executor_host::get_blob_info(blocks)?;
+        let (versioned_hash, _) = BlobVerifier::verify(&blob_info, blocks.len())?;
 
         let mut batch_header: Vec<u8> = Vec::with_capacity(96);
         batch_header.extend_from_slice(&batch_info.data_hash().0);
         batch_header.extend_from_slice(&versioned_hash.0);
         batch_header.extend_from_slice(&batch_info.sequencer_root().0);
-        let mut batch_file = File::create(format!("{}/batch_header.data", proof_dir)).unwrap();
-        batch_file.write_all(&batch_header[..]).expect("failed to batch_header");
-        true
+        let mut batch_file = File::create(format!("{}/batch_header.data", proof_dir))?;
+        batch_file.write_all(&batch_header[..])?;
+        Ok(true)
     } else {
         // ... error handling ...
-        false
+        Ok(false)
     }
 }

Then update the caller at line 89 to handle the Result.

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In prover/bin/server/src/queue.rs around lines 125 to 139, the code uses
unwrap() on morph_executor_host::get_blob_info, BlobVerifier::verify, and
filesystem operations which will panic; change save_batch_header to return
Result<bool, anyhow::Error> and replace unwrap()/expect() with ? or map_err(...)
to propagate errors (convert errors into anyhow::Error when needed), write the
file using File::create(...)? and write_all(...)? and return Ok(true) on
success; finally update the caller at the call site around line 89 to handle the
Result (propagate the error or handle it explicitly) instead of assuming
success.

} else {
let e = verify_result.unwrap_err();
let error_data = serde_json::json!({
"error_code": "EVM_EXECUTE_NOT_EXPECTED",
"error_msg": e.to_string()
});
let mut batch_file = File::create(format!("{}/execute_result.json", proof_dir)).unwrap();
batch_file
.write_all(serde_json::to_string_pretty(&error_data).unwrap().as_bytes())
.expect("failed to write error");
log::error!("EVM verification failed for batch {}: {}", batch_index, e);
false
}
Comment on lines +140 to +152
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Error path can still panic—defeats graceful error handling.

The error path writes execute_result.json to surface verification failures, but it uses unwrap() on lines 146 and 148, and expect() on line 149. If file creation or JSON serialization fails, the function will panic despite being in an error-handling branch, undermining the intent of returning false gracefully.

Convert these to return Result<bool, anyhow::Error> as suggested in the prior review comment, or at minimum use fallback logging:

     } else {
         let e = verify_result.unwrap_err();
         let error_data = serde_json::json!({
             "error_code": "EVM_EXECUTE_NOT_EXPECTED",
             "error_msg": e.to_string()
         });
-        let mut batch_file = File::create(format!("{}/execute_result.json", proof_dir)).unwrap();
-        batch_file
-            .write_all(serde_json::to_string_pretty(&error_data).unwrap().as_bytes())
-            .expect("failed to write error");
-        log::error!("EVM verification failed for batch {}: {}", batch_index, e);
+        match File::create(format!("{}/execute_result.json", proof_dir))
+            .and_then(|mut f| f.write_all(serde_json::to_string_pretty(&error_data)?.as_bytes()))
+        {
+            Ok(_) => log::error!("EVM verification failed for batch {}: {}", batch_index, e),
+            Err(write_err) => log::error!("EVM verification failed for batch {} ({}), and failed to write execute_result.json: {}", batch_index, e, write_err),
+        }
         false
     }

Committable suggestion skipped: line range outside the PR's diff.

}

fn save_proof(batch_index: u64, proof: EvmProofFixture) {
Expand Down Expand Up @@ -191,3 +210,19 @@ fn save_trace(batch_index: u64, chunk_traces: &Vec<BlockTrace>) {
serde_json::to_writer_pretty(writer, &chunk_traces).unwrap();
log::info!("chunk_traces of batch_index = {:#?} saved", batch_index);
}

#[test]
fn test_save_execute() {
let batch_index = 102u64;

let mut blocks = load_trace("../../testdata/viridian/eip7702_traces.json");
println!("blocks.len(): {:?}", blocks.len());
let traces = blocks.first_mut().unwrap();

if !save_batch_header(traces, batch_index) {
save_trace(batch_index, traces);
println!("save_batch_header error");
} else {
println!("save_batch_header success");
}
}
27 changes: 26 additions & 1 deletion prover/bin/server/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,31 @@ async fn query_proof(batch_index: String) -> ProveResult {
.unwrap_or("nothing")
.ends_with(format!("batch_{}", batch_index.trim()).as_str())
{
// execute_result
let prove_result_path = path.join("execute_result.json");
if prove_result_path.exists() {
match fs::File::open(prove_result_path) {
Ok(file) => {
let reader = BufReader::new(file);
let prove_result: serde_json::Value =
serde_json::from_reader(reader).unwrap_or_default();
if let Some(error_code) = prove_result.get("error_code") {
result.error_code = error_code.as_str().unwrap_or("").to_string();
}
if let Some(error_msg) = prove_result.get("error_msg") {
result.error_msg = error_msg.as_str().unwrap_or("").to_string();
}
}
Err(e) => {
log::error!("Failed to load prove_result: {:#?}", e);
result.error_msg = String::from("Failed to load prove_result");
}
}
}
if !result.error_code.is_empty() {
return result;
}

//pi_batch_agg.data
let proof_path = path.join("plonk_proof.json");
if !proof_path.exists() {
Expand Down Expand Up @@ -261,7 +286,7 @@ async fn query_proof(batch_index: String) -> ProveResult {
break;
}
}
if result.proof_data.is_empty() {
if result.proof_data.is_empty() && result.error_msg.is_empty() {
result.error_msg = String::from("No proof was found");
}
result
Expand Down
2 changes: 1 addition & 1 deletion prover/bin/shadow-prove/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ async fn main() {
);

loop {
sleep(Duration::from_secs(12)).await;
sleep(Duration::from_secs(30)).await;
// Sync & Prove
let result = match batch_syncer.sync_batch().await {
Ok(Some(batch)) => shadow_prover.prove(batch).await,
Expand Down
8 changes: 8 additions & 0 deletions prover/bin/shadow-prove/src/shadow_prove.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,10 @@ async fn handle_with_prover<T, P, N>(

// Query existing proof
if let Some(prove_result) = query_proof(batch_index).await {
if !prove_result.error_code.is_empty() {
log::error!("query proof and prove state error, batch_index: {:?}, prove_result.error_code: {:?}, prove_result.error_msg: {:?}", batch_index, prove_result.error_code, prove_result.error_msg);
break;
}
if !prove_result.proof_data.is_empty() {
log::info!("query proof and prove state: {:?}", batch_index);
prove_state(batch_index, l1_shadow_rollup).await;
Expand Down Expand Up @@ -160,6 +164,10 @@ async fn handle_with_prover<T, P, N>(
max_waiting_time -= 300; // Query results every 5 minutes.
match query_proof(batch_index).await {
Some(prove_result) => {
if !prove_result.error_code.is_empty() {
log::error!("query proof and prove state error, batch_index: {:?}, prove_result.error_code: {:?}, prove_result.error_msg: {:?}", batch_index, prove_result.error_code, prove_result.error_msg);
return;
}
log::debug!("query proof and prove state: {:#?}", batch_index);
if !prove_result.proof_data.is_empty() {
prove_state(batch_index, l1_shadow_rollup).await;
Expand Down
6 changes: 3 additions & 3 deletions prover/bin/shadow-prove/src/shadow_rollup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ where

// Batch should not have been verified yet.
if is_prove_success(batch_info.batch_index, &self.l1_shadow_rollup).await.unwrap_or(true) {
log::debug!("batch of {:?} already prove state successful", batch_info.batch_index);
log::info!("batch of {:?} already prove state successful", batch_info.batch_index);
return Ok(None);
};

Expand Down Expand Up @@ -208,7 +208,7 @@ where
None => return Err(String::from("batch_blocks_inspect none")),
};

if blocks.0 <= blocks.1 {
if blocks.0 >= blocks.1 {
return Err(String::from("blocks is empty"));
}

Expand All @@ -228,11 +228,11 @@ where
// A rollup commit_batch_input contains prev batch_header.
let next_tx_hash = match logs.last() {
Some(log) => log.transaction_hash.unwrap_or_default(),

None => {
return Err("find commit_batch log error".to_string());
}
};

let batch_header = batch_header_inspect(l1_provider, next_tx_hash)
.await
.ok_or_else(|| "Failed to inspect batch header".to_string())?;
Expand Down
1 change: 1 addition & 0 deletions prover/crates/core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ tiny-keccak.workspace = true

sbv-primitives.workspace = true
sbv-utils.workspace = true
cfg-if = { workspace = true }

[dev-dependencies]
ctor.workspace = true
Expand Down
10 changes: 4 additions & 6 deletions prover/crates/core/src/database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,12 +193,10 @@ impl DatabaseRef for ReadOnlyDB {
// then the upcoming trace contains code (meaning the code is used in this new block),
// we can't directly update the CacheDB, so we offer the code by hash here.
// However, if the code still cannot be found, this is an error.
self.code_db.get(&hash).cloned().ok_or_else(|| {
unreachable!(
"Code is either loaded or not needed (like EXTCODESIZE), code hash: {:?}",
hash
);
})
Ok(self.code_db.get(&hash).cloned().unwrap_or_else(|| {
println!("---------------->code_by_hash_ref error: {:?}", hash);
Bytecode::default()
}))
}

/// Get storage value of address at index.
Expand Down
14 changes: 11 additions & 3 deletions prover/crates/core/src/executor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,12 @@ impl EvmExecutor<'_> {
};

for (idx, tx) in l2_trace.transactions().enumerate() {
cfg_if::cfg_if! {
if #[cfg(not(target_os = "zkvm"))] {
println!("handle block: {:?}, handle tx: {:?}th", l2_trace.number(), idx);
}
}

cycle_tracker_start!("handle tx {}", idx);

dev_trace!("handle {idx}th tx");
Expand Down Expand Up @@ -277,9 +283,11 @@ impl EvmExecutor<'_> {
poseidon_code_hash.0,
];
cycle_track!(
zktrie
.update_account(addr.as_slice(), &acc_data)
.unwrap_or_else(|_| panic!("failed to update account: {}", addr)),
zktrie.update_account(addr.as_slice(), &acc_data).unwrap_or_else(|e| println!(
"---------------->failed to update account: {:?}, address: {:?}",
addr,
e.to_string()
)),
"Zktrie::update_account"
);

Expand Down
18 changes: 1 addition & 17 deletions prover/crates/primitives/src/types/tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ pub struct TransactionTrace {
pub(crate) access_list: AccessList,
/// authorization list
#[serde(rename = "authorizationList")]
#[serde(default)]
#[serde_as(as = "DefaultOnNull")]
pub(crate) authorization_list: AuthorizationList,
/// signature v
Expand Down Expand Up @@ -552,20 +553,3 @@ impl Decodable for TypedTransaction {
Ok(TypedTransaction::Enveloped(TxEnvelope::decode_2718(buf).unwrap()))
}
}

#[cfg(test)]
mod tests {
use super::*;

const TRACE: &str = include_str!("../../../../testdata/dev.json");

#[test]
fn test_transaction_trace_deserialize() {
let trace = serde_json::from_str::<serde_json::Value>(TRACE).unwrap()["result"].clone();
let txs = trace["transactions"].clone();
for tx in txs.as_array().unwrap() {
let tx: TransactionTrace = serde_json::from_value(tx.clone()).unwrap();
let _ = tx.try_build_typed_tx().unwrap();
}
}
}
12 changes: 0 additions & 12 deletions prover/tests/algebra/host/build.rs

This file was deleted.

Loading