From df2e56452cede21c70bb3c7bc601afa984f16998 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 3 Mar 2026 16:06:44 +0200 Subject: [PATCH 1/8] fix(store): network notes must be public [main] (#1736) --- CHANGELOG.md | 4 ++ crates/proto/src/domain/note.rs | 6 +++ crates/rpc/src/server/api.rs | 12 +++--- crates/store/Cargo.toml | 2 +- crates/store/src/db/mod.rs | 43 +++++++++++++++++++-- crates/store/src/db/models/queries/notes.rs | 2 +- 6 files changed, 57 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2dc3173446..42b297d2ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## v0.13.8 (TBD) + +- Private notes with the network note attachment are no longer incorrectly considered as network notes (#[#1736](https://github.com/0xMiden/node/pull/1736)). + ## v0.13.7 (2026-02-25) - Updated `SyncAccountStorageMaps` and `SyncAccountVault` to allow all accounts with public state, including network accounts ([#1711](https://github.com/0xMiden/node/pull/1711)). diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 94fea5bebc..33ec2ba273 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -281,6 +281,10 @@ impl TryFrom for SingleTargetNetworkNote { type Error = NetworkNoteError; fn try_from(note: Note) -> Result { + if note.metadata().note_type() != NoteType::Public { + return Err(NetworkNoteError::PrivateNote); + } + // Single-target network notes are identified by having a NetworkAccountTarget attachment let attachment = note.metadata().attachment(); let account_target = NetworkAccountTarget::try_from(attachment) @@ -318,6 +322,8 @@ where pub enum NetworkNoteError { #[error("note does not have a valid NetworkAccountTarget attachment: {0}")] InvalidAttachment(#[source] NetworkAccountTargetError), + #[error("note is private")] + PrivateNote, } // NOTE SCRIPT diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 13d26962eb..24bc2920bb 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -519,13 +519,11 @@ fn endpoint_limits(params: &[(&str, usize)]) -> proto::rpc::EndpointLimits { /// Cached RPC query parameter limits. static RPC_LIMITS: LazyLock = LazyLock::new(|| { - use { - QueryParamAccountIdLimit as AccountId, - QueryParamNoteIdLimit as NoteId, - QueryParamNoteTagLimit as NoteTag, - QueryParamNullifierLimit as Nullifier, - QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal, - }; + use QueryParamAccountIdLimit as AccountId; + use QueryParamNoteIdLimit as NoteId; + use QueryParamNoteTagLimit as NoteTag; + use QueryParamNullifierLimit as Nullifier; + use QueryParamStorageMapKeyTotalLimit as StorageMapKeyTotal; proto::rpc::RpcLimits { endpoints: std::collections::HashMap::from([ diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 1c62c7ab7c..837f158859 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -19,7 +19,7 @@ anyhow = { workspace = true } deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } deadpool-diesel = { features = ["sqlite"], version = "0.6" } deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel = { features = ["numeric", "returning_clauses_for_sqlite_3_35", "sqlite"], version = "2.3" } diesel_migrations = { features = ["sqlite"], version = "2.3" } fs-err = { workspace = true } hex = { version = "0.4" } diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fc4a5cab4..045ba9a9a2 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -3,7 +3,14 @@ use std::ops::RangeInclusive; use std::path::PathBuf; use anyhow::Context; -use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; +use diesel::{ + BoolExpressionMethods, + Connection, + ExpressionMethods, + QueryableByName, + RunQueryDsl, + SqliteConnection, +}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_proto::generated as proto; use miden_node_utils::tracing::OpenTelemetrySpanExt; @@ -29,8 +36,8 @@ use crate::COMPONENT; use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; -use crate::db::models::queries::StorageMapValuesPage; -use crate::db::models::{Page, queries}; +use crate::db::models::queries::{NetworkNoteType, StorageMapValuesPage}; +use crate::db::models::{Page, deserialize_raw_vec, queries}; use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; use crate::genesis::GenesisBlock; @@ -321,9 +328,39 @@ impl Db { let me = Db { pool }; me.query("migrations", apply_migrations).await?; + me.fixup_network_note_classification().await?; Ok(me) } + /// Temporary fixup of private notes which were misclassified as network notes. + #[instrument(target = COMPONENT, skip_all, err)] + async fn fixup_network_note_classification(&self) -> Result<()> { + let notes = self + .transact("fixup network notes", move |conn| { + let updated = diesel::update(schema::notes::table) + .filter( + schema::notes::network_note_type + .eq(NetworkNoteType::SingleTarget as i32) + .and(schema::notes::nullifier.is_null()), + ) + .set(schema::notes::network_note_type.eq(NetworkNoteType::None as i32)) + .returning(schema::notes::note_id) + .get_results::>(conn)?; + + deserialize_raw_vec::<_, NoteId>(updated).map_err(DatabaseError::from) + }) + .await?; + + for note in notes { + tracing::info!( + note.id = %note, + "Fixed private note misclassified as network note" + ); + } + + Ok(()) + } + /// Loads all the nullifiers from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub(crate) async fn select_all_nullifiers(&self) -> Result> { diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index a2ab7b1bb0..cfc8b9b2c6 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -878,7 +878,7 @@ impl From<(NoteRecord, Option)> for NoteInsertRow { let attachment = note.metadata.attachment(); let target_account_id = NetworkAccountTarget::try_from(attachment).ok(); - let network_note_type = if target_account_id.is_some() { + let network_note_type = if target_account_id.is_some() && !note.metadata.is_private() { NetworkNoteType::SingleTarget } else { NetworkNoteType::None From f8501ab9c9b083bd96dd75834a764a1f76383ae6 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 3 Mar 2026 17:07:45 +0200 Subject: [PATCH 2/8] ci(docker): use `cargo chef` and cache to github (#1631) (#1741) --- .github/workflows/build-docker.yml | 34 ++++------------------ bin/node/Dockerfile | 45 +++++++++++++++++------------- 2 files changed, 32 insertions(+), 47 deletions(-) diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 0e7fe0c073..b259c23fd9 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -12,38 +12,16 @@ permissions: jobs: docker-build: - strategy: - matrix: - component: [node] runs-on: Linux-ARM64-Runner - name: Build ${{ matrix.component }} steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Configure AWS credentials - if: github.event.pull_request.head.repo.fork == false - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: ${{ secrets.AWS_REGION }} - role-to-assume: ${{ secrets.AWS_ROLE }} - role-session-name: GithubActionsSession - - - name: Set cache parameters - if: github.event.pull_request.head.repo.fork == false - run: | - echo "CACHE_FROM=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - echo "CACHE_TO=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - with: - cache-binary: true - - name: Build Docker image - uses: docker/build-push-action@v5 + - name: Build and push + uses: docker/build-push-action@v6 with: push: false - file: ./bin/${{ matrix.component }}/Dockerfile - cache-from: ${{ env.CACHE_FROM || '' }} - cache-to: ${{ env.CACHE_TO || '' }} + file: ./bin/node/Dockerfile + cache-from: type=gha + # Only save cache on push into next + cache-to: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' && 'type=gha,mode=max' || '' }} diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 832b0bb8d2..9778daec80 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,39 +1,47 @@ -FROM rust:1.90-slim-bullseye AS builder - +FROM rust:1.90-slim-bullseye AS chef # Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y llvm clang libclang-dev pkg-config libssl-dev libsqlite3-dev ca-certificates && \ + apt-get install -y \ + llvm \ + clang \ + libclang-dev \ + cmake \ + pkg-config \ + libssl-dev \ + libsqlite3-dev \ + ca-certificates && \ rm -rf /var/lib/apt/lists/* - +RUN cargo install cargo-chef WORKDIR /app -COPY ./Cargo.toml . -COPY ./Cargo.lock . -COPY ./bin ./bin -COPY ./crates ./crates -COPY ./proto ./proto -RUN cargo install --path bin/node --locked +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json -FROM debian:bullseye-slim +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --recipe-path recipe.json +# Build application +COPY . . +RUN cargo build --release --locked --bin miden-node -# Update machine & install required packages -# The installation of sqlite3 is needed for correct function of the SQLite database +# Base line runtime image with runtime dependencies installed. +FROM debian:bullseye-slim AS runtime-base RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y --no-install-recommends \ - sqlite3 \ + apt-get install -y --no-install-recommends sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node - +FROM runtime-base AS runtime +COPY --from=builder /app/target/release/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ org.opencontainers.image.url=https://0xMiden.github.io/ \ org.opencontainers.image.documentation=https://github.com/0xMiden/miden-node \ org.opencontainers.image.source=https://github.com/0xMiden/miden-node \ org.opencontainers.image.vendor=Miden \ org.opencontainers.image.licenses=MIT - ARG CREATED ARG VERSION ARG COMMIT @@ -43,6 +51,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 - # Miden node does not spawn sub-processes, so it can be used as the PID1 CMD miden-node From 37335ef2d53be6dc4da0b3855959a156b4708636 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 4 Mar 2026 09:53:24 +0100 Subject: [PATCH 3/8] feat(store): prune in-memory `InnerForest` (#1635) --- CHANGELOG.md | 1 + Cargo.lock | 9 +- Cargo.toml | 3 +- crates/store/Cargo.toml | 1 + crates/store/src/db/mod.rs | 104 +- .../store/src/db/models/queries/accounts.rs | 17 +- crates/store/src/db/tests.rs | 921 +++++++++++++++++- crates/store/src/inner_forest/mod.rs | 539 +++++----- crates/store/src/inner_forest/tests.rs | 906 ++++++++++++----- crates/store/src/state/loader.rs | 2 +- crates/store/src/state/mod.rs | 116 ++- 11 files changed, 2065 insertions(+), 554 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42b297d2ac..f39c4cae60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,7 @@ ### Enhancements +- Added cleanup of old account data from the in-memory forest ([#1175](https://github.com/0xMiden/miden-node/issues/1175)) - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). diff --git a/Cargo.lock b/Cargo.lock index e8e6c19983..a8f696a4ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2586,9 +2586,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.19.4" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" +checksum = "999926d48cf0929a39e06ce22299084f11d307ca9e765801eb56bf192b07054b" dependencies = [ "blake3", "cc", @@ -2621,9 +2621,9 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.4" +version = "0.19.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" +checksum = "3550b5656b791fec59c0b6089b4d0368db746a34749ccd47e59afb01aa877e9e" dependencies = [ "quote", "syn 2.0.114", @@ -2922,6 +2922,7 @@ dependencies = [ "rand_chacha 0.9.0", "regex", "serde", + "tempfile", "termtree", "thiserror 2.0.18", "tokio", diff --git a/Cargo.toml b/Cargo.toml index a1a9387756..7ffc9f6319 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,7 @@ miden-tx-batch-prover = { version = "0.13" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } -miden-crypto = { default-features = false, version = "0.19" } +miden-crypto = { version = "0.19.5" } # External dependencies anyhow = { version = "1.0" } @@ -87,6 +87,7 @@ rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } +tempfile = { version = "3.12" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 837f158859..b5344e8d99 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -55,6 +55,7 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } +tempfile = { workspace = true } termtree = { version = "0.5" } [features] diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 045ba9a9a2..a03d2c6ad1 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,4 +1,5 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::mem::size_of; use std::ops::RangeInclusive; use std::path::PathBuf; @@ -13,6 +14,7 @@ use diesel::{ }; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_proto::generated as proto; +use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; @@ -41,6 +43,13 @@ use crate::db::models::{Page, deserialize_raw_vec, queries}; use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; use crate::genesis::GenesisBlock; +const STORAGE_MAP_VALUE_PER_ROW_BYTES: usize = + 2 * size_of::() + size_of::() + size_of::(); + +fn default_storage_map_entries_limit() -> usize { + MAX_RESPONSE_PAYLOAD_BYTES / STORAGE_MAP_VALUE_PER_ROW_BYTES +} + pub(crate) mod manager; mod migrations; @@ -637,13 +646,106 @@ impl Db { &self, account_id: AccountId, block_range: RangeInclusive, + entries_limit: Option, ) -> Result { + let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit); + self.transact("select storage map sync values", move |conn| { - models::queries::select_account_storage_map_values(conn, account_id, block_range) + models::queries::select_account_storage_map_values_paged( + conn, + account_id, + block_range, + entries_limit, + ) }) .await } + /// Reconstructs storage map details from the database for a specific slot at a block. + /// + /// Used as fallback when `InnerForest` cache misses (historical or evicted queries). + /// Rebuilds all entries by querying the DB and filtering to the specific slot. + /// + /// Returns: + /// - `::LimitExceeded` when too many entries are present + /// - `::AllEntries` if the size is less than or equal given `entries_limit`, if any + pub(crate) async fn reconstruct_storage_map_from_db( + &self, + account_id: AccountId, + slot_name: miden_protocol::account::StorageSlotName, + block_num: BlockNumber, + entries_limit: Option, + ) -> Result { + use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; + use miden_protocol::EMPTY_WORD; + + // TODO this remains expensive with a large history until we implement pruning for DB + // columns + let mut values = Vec::new(); + let mut block_range_start = BlockNumber::GENESIS; + let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit); + + let mut page = self + .select_storage_map_sync_values( + account_id, + block_range_start..=block_num, + Some(entries_limit), + ) + .await?; + + values.extend(page.values); + let mut last_block_included = page.last_block_included; + + loop { + if page.last_block_included == block_num || page.last_block_included < block_range_start + { + break; + } + + block_range_start = page.last_block_included.child(); + page = self + .select_storage_map_sync_values( + account_id, + block_range_start..=block_num, + Some(entries_limit), + ) + .await?; + + if page.last_block_included <= last_block_included { + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + } + + last_block_included = page.last_block_included; + values.extend(page.values); + } + + if page.last_block_included != block_num { + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + } + + // Filter to the specific slot and collect latest values per key + let mut latest_values = BTreeMap::::new(); + for value in values { + if value.slot_name == slot_name { + let raw_key = value.key; + latest_values.insert(raw_key, value.value); + } + } + + // Remove EMPTY_WORD entries (deletions) + latest_values.retain(|_, v| *v != EMPTY_WORD); + + if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + } + + let entries = Vec::from_iter(latest_values.into_iter()); + Ok(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::AllEntries(entries), + }) + } + /// Emits size metrics for each table in the database, and the entire database. #[instrument(target = COMPONENT, skip_all, err)] pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 6f8e3834f6..c117e8ab94 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -653,19 +653,14 @@ impl StorageMapValue { /// /// * Response payload size: 0 <= size <= 2MB /// * Storage map values per response: 0 <= count <= (2MB / (2*Word + u32 + u8)) + 1 -pub(crate) fn select_account_storage_map_values( +pub(crate) fn select_account_storage_map_values_paged( conn: &mut SqliteConnection, account_id: AccountId, block_range: RangeInclusive, + limit: usize, ) -> Result { use schema::account_storage_map_values as t; - // TODO: These limits should be given by the protocol. - // See miden-base/issues/1770 for more details - pub const ROW_OVERHEAD_BYTES: usize = - 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; - if !account_id.has_public_state() { return Err(DatabaseError::AccountNotPublic(account_id)); } @@ -686,13 +681,13 @@ pub(crate) fn select_account_storage_map_values( .and(t::block_num.le(block_range.end().to_raw_sql())), ) .order(t::block_num.asc()) - .limit(i64::try_from(MAX_ROWS + 1).expect("limit fits within i64")) + .limit(i64::try_from(limit + 1).expect("limit fits within i64")) .load(conn)?; // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() > MAX_ROWS + && raw.len() > limit { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at @@ -708,7 +703,9 @@ pub(crate) fn select_account_storage_map_values( } else { ( *block_range.end(), - raw.into_iter().map(StorageMapValue::from_raw_row).collect::>()?, + raw.into_iter() + .map(StorageMapValue::from_raw_row) + .collect::, _>>()?, ) }; diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 44b11c9b43..5b5cbd19c5 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -4,8 +4,9 @@ use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; +use assert_matches::assert_matches; use diesel::{Connection, SqliteConnection}; -use miden_node_proto::domain::account::AccountSummary; +use miden_node_proto::domain::account::{AccountSummary, StorageMapEntries}; use miden_node_utils::fee::{test_fee, test_fee_params}; use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::delta::AccountUpdateDetails; @@ -36,6 +37,7 @@ use miden_protocol::block::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::crypto::rand::RpoRandomCoin; use miden_protocol::note::{ Note, @@ -71,6 +73,7 @@ use miden_standards::code_builder::CodeBuilder; use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; use pretty_assertions::assert_eq; use rand::Rng; +use tempfile::tempdir; use super::{AccountInfo, NoteRecord, NullifierInfo}; use crate::db::TransactionSummary; @@ -78,6 +81,7 @@ use crate::db::migrations::apply_migrations; use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; use crate::db::models::{Page, queries, utils}; use crate::errors::DatabaseError; +use crate::inner_forest::HISTORICAL_BLOCK_RETENTION; fn create_db() -> SqliteConnection { let mut conn = SqliteConnection::establish(":memory:").expect("In memory sqlite always works"); @@ -1069,9 +1073,13 @@ fn sql_account_storage_map_values_insertion() { AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); insert_account_delta(conn, account_id, block1, &delta1); - let storage_map_page = - queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block1) - .unwrap(); + let storage_map_page = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block1, + 1024, + ) + .unwrap(); assert_eq!(storage_map_page.values.len(), 2, "expect 2 initial rows"); // Update key1 at block 2 @@ -1084,9 +1092,13 @@ fn sql_account_storage_map_values_insertion() { .unwrap(); insert_account_delta(conn, account_id, block2, &delta2); - let storage_map_values = - queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block2) - .unwrap(); + let storage_map_values = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block2, + 1024, + ) + .unwrap(); assert_eq!(storage_map_values.values.len(), 3, "three rows (with duplicate key)"); // key1 should now be value3 at block2; key2 remains value2 at block1 @@ -1180,10 +1192,11 @@ fn select_storage_map_sync_values() { ) .unwrap(); - let page = queries::select_account_storage_map_values( + let page = queries::select_account_storage_map_values_paged( &mut conn, account_id, BlockNumber::from(2)..=BlockNumber::from(3), + 1024, ) .unwrap(); @@ -1236,10 +1249,11 @@ fn select_storage_map_sync_values_for_network_account() { ) .unwrap(); - let page = queries::select_account_storage_map_values( + let page = queries::select_account_storage_map_values_paged( &mut conn, account_id, BlockNumber::GENESIS..=block_num, + 1024, ) .unwrap(); @@ -1250,6 +1264,133 @@ fn select_storage_map_sync_values_for_network_account() { ); } +#[test] +fn select_storage_map_sync_values_paginates_until_last_block() { + let mut conn = create_db(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(7); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + create_block(&mut conn, block1); + create_block(&mut conn, block2); + create_block(&mut conn, block3); + + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) + .unwrap(); + + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + num_to_word(1), + num_to_word(11), + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + num_to_word(2), + num_to_word(22), + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block3, + slot_name.clone(), + num_to_word(3), + num_to_word(33), + ) + .unwrap(); + + let page = queries::select_account_storage_map_values_paged( + &mut conn, + account_id, + BlockNumber::GENESIS..=block3, + 1, + ) + .unwrap(); + + assert_eq!(page.last_block_included, block1, "should truncate at block 1"); + assert_eq!(page.values.len(), 1, "should include block 1 only"); +} + +#[tokio::test] +#[miden_node_test_macro::enable_logging] +async fn reconstruct_storage_map_from_db_pages_until_latest() { + let temp_dir = tempdir().unwrap(); + let db_path = temp_dir.path().join("store.sqlite"); + + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(9); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + let db = crate::db::Db::load(db_path).await.unwrap(); + let slot_name_for_db = slot_name.clone(); + db.query("insert paged values", move |db_conn| { + db_conn.transaction(|db_conn| { + apply_migrations(db_conn)?; + create_block(db_conn, block1); + create_block(db_conn, block2); + create_block(db_conn, block3); + + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 0)], block1)?; + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 1)], block2)?; + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 2)], block3)?; + + queries::insert_account_storage_map_value( + db_conn, + account_id, + block1, + slot_name_for_db.clone(), + num_to_word(1), + num_to_word(10), + )?; + queries::insert_account_storage_map_value( + db_conn, + account_id, + block2, + slot_name_for_db.clone(), + num_to_word(2), + num_to_word(20), + )?; + queries::insert_account_storage_map_value( + db_conn, + account_id, + block3, + slot_name_for_db.clone(), + num_to_word(3), + num_to_word(30), + )?; + Ok::<_, DatabaseError>(()) + }) + }) + .await + .unwrap(); + + let details = db + .reconstruct_storage_map_from_db(account_id, slot_name.clone(), block3, Some(1)) + .await + .unwrap(); + + assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 3); + }); +} + // UTILITIES // ------------------------------------------------------------------------------------------- fn num_to_word(n: u64) -> Word { @@ -2153,10 +2294,11 @@ fn db_roundtrip_storage_map_values() { .unwrap(); // Retrieve - let page = queries::select_account_storage_map_values( + let page = queries::select_account_storage_map_values_paged( &mut conn, account_id, BlockNumber::GENESIS..=block_num, + 1024, ) .unwrap(); @@ -2280,7 +2422,7 @@ fn db_roundtrip_account_storage_with_maps() { #[test] #[miden_node_test_macro::enable_logging] -fn test_note_metadata_with_attachment_roundtrip() { +fn db_roundtrip_note_metadata_attachment() { let mut conn = create_db(); let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); @@ -2331,3 +2473,760 @@ fn test_note_metadata_with_attachment_roundtrip() { "NetworkAccountTarget should have the correct target account ID" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_matches_db_storage_map_roots_across_updates() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + use miden_protocol::crypto::merkle::smt::Smt; + + use crate::inner_forest::InnerForest; + + /// Reconstructs storage map root from DB entries at a specific block. + fn reconstruct_storage_map_root_from_db( + conn: &mut SqliteConnection, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + ) -> Option { + let storage_values = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block_num, + 1024, + ) + .unwrap(); + + // Filter to the specific slot and get most recent value for each key + let mut latest_values: BTreeMap = BTreeMap::new(); + for value in storage_values.values { + if value.slot_name == *slot_name { + latest_values.insert(value.key, value.value); + } + } + + if latest_values.is_empty() { + return None; + } + + // Build SMT from entries + let entries: Vec<(Word, Word)> = latest_values + .into_iter() + .filter_map(|(key, value)| { + if value == EMPTY_WORD { + None + } else { + // Keys are stored unhashed in DB, match InnerForest behavior + Some((key, value)) + } + }) + .collect(); + + if entries.is_empty() { + use miden_protocol::crypto::merkle::EmptySubtreeRoots; + use miden_protocol::crypto::merkle::smt::SMT_DEPTH; + return Some(*EmptySubtreeRoots::entry(SMT_DEPTH, 0)); + } + + let mut smt = Smt::default(); + for (key, value) in entries { + smt.insert(miden_protocol::account::StorageMap::hash_key(key), value).unwrap(); + } + + Some(smt.root()) + } + + let mut conn = create_db(); + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + create_block(&mut conn, block1); + create_block(&mut conn, block2); + create_block(&mut conn, block3); + + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) + .unwrap(); + + let slot_map = StorageSlotName::mock(1); + let slot_value = StorageSlotName::mock(2); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + let value3 = num_to_word(3000); + + // Block 1: Add storage map entries and a storage value + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(key1, value1); + map_delta_1.insert(key2, value2); + + let raw_1 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_1)), + (slot_value.clone(), StorageSlotDelta::Value(value1)), + ]); + let storage_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = + AccountDelta::new(account_id, storage_1.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block1, &delta_1); + forest.update_account(block1, &delta_1).unwrap(); + + // Verify forest matches DB for block 1 + let forest_root_1 = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); + let db_root_1 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_1, db_root_1, + "Storage map root at block 1 should match between InnerForest and DB" + ); + + // Block 2: Delete storage map entry (set to EMPTY_WORD) and delete storage value + let mut map_delta_2 = StorageMapDelta::default(); + map_delta_2.insert(key1, EMPTY_WORD); + + let raw_2 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_2)), + (slot_value.clone(), StorageSlotDelta::Value(EMPTY_WORD)), + ]); + let storage_2 = AccountStorageDelta::from_raw(raw_2); + let delta_2 = AccountDelta::new( + account_id, + storage_2.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block2, &delta_2); + forest.update_account(block2, &delta_2).unwrap(); + + // Verify forest matches DB for block 2 + let forest_root_2 = forest.get_storage_map_root(account_id, &slot_map, block2).unwrap(); + let db_root_2 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block2) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_2, db_root_2, + "Storage map root at block 2 should match between InnerForest and DB" + ); + + // Block 3: Re-add same value as block 1 and add different map entry + let mut map_delta_3 = StorageMapDelta::default(); + map_delta_3.insert(key2, value3); // Update existing key + + let raw_3 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_3)), + (slot_value.clone(), StorageSlotDelta::Value(value1)), // Same as block 1 + ]); + let storage_3 = AccountStorageDelta::from_raw(raw_3); + let delta_3 = AccountDelta::new( + account_id, + storage_3.clone(), + AccountVaultDelta::default(), + Felt::new(3), + ) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block3, &delta_3); + forest.update_account(block3, &delta_3).unwrap(); + + // Verify forest matches DB for block 3 + let forest_root_3 = forest.get_storage_map_root(account_id, &slot_map, block3).unwrap(); + let db_root_3 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block3) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_3, db_root_3, + "Storage map root at block 3 should match between InnerForest and DB" + ); + + // Verify we can query historical roots + let forest_root_1_check = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); + let db_root_1_check = + reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) + .expect("DB should have storage map root"); + assert_eq!( + forest_root_1_check, db_root_1_check, + "Historical query for block 1 should match" + ); + + // Verify roots are different across blocks (since we modified the map) + assert_ne!(forest_root_1, forest_root_2, "Roots should differ after deletion"); + assert_ne!(forest_root_2, forest_root_3, "Roots should differ after modification"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_shared_roots_not_deleted_prematurely() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + use miden_protocol::testing::account_id::{ + ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, + }; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let account3 = AccountId::try_from(ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE).unwrap(); + + let block01 = BlockNumber::from(1); + let block02 = BlockNumber::from(2); + let block50 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION); + let block51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); + let block52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); + let block53 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 3); + let slot_name = StorageSlotName::mock(1); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + + // All three accounts add identical storage maps at block 1 + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + map_delta.insert(key2, value2); + + // Setups a single slot with a map and two key-value-pairs + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta.clone()))]); + let storage = AccountStorageDelta::from_raw(raw); + + // Account 1 + let delta1 = + AccountDelta::new(account1, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block01, &delta1).unwrap(); + + // Account 2 (same storage) + let delta2 = + AccountDelta::new(account2, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block02, &delta2).unwrap(); + + // Account 3 (same storage) + let delta3 = + AccountDelta::new(account3, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block02, &delta3).unwrap(); + + // All three accounts should have the same root (structural sharing in SmtForest) + let root1 = forest.get_storage_map_root(account1, &slot_name, block01).unwrap(); + let root2 = forest.get_storage_map_root(account2, &slot_name, block02).unwrap(); + let root3 = forest.get_storage_map_root(account3, &slot_name, block02).unwrap(); + + // identical maps means identical roots + assert_eq!(root1, root2); + assert_eq!(root2, root3); + + // Verify we can get witnesses for all three accounts and verify them against roots + let witness1 = forest + .get_storage_map_witness(account1, &slot_name, block01, key1) + .expect("Account1 should have accessible storage map"); + let witness2 = forest + .get_storage_map_witness(account2, &slot_name, block02, key1) + .expect("Account2 should have accessible storage map"); + let witness3 = forest + .get_storage_map_witness(account3, &slot_name, block02, key1) + .expect("Account3 should have accessible storage map"); + + // Verify witnesses against storage map roots using SmtProof::compute_root + let proof1: SmtProof = witness1.into(); + assert_eq!(proof1.compute_root(), root1, "Witness1 must verify against root1"); + + let proof2: SmtProof = witness2.into(); + assert_eq!(proof2.compute_root(), root2, "Witness2 must verify against root2"); + + let proof3: SmtProof = witness3.into(); + assert_eq!(proof3.compute_root(), root3, "Witness3 must verify against root3"); + + let total_roots_removed = forest.prune(block50); + assert_eq!(total_roots_removed, 0); + + // Update accounts 1,2,3 + let mut map_delta_update = StorageMapDelta::default(); + map_delta_update.insert(key1, num_to_word(1001)); // Slight change + let raw_update = + BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_update))]); + let storage_update = AccountStorageDelta::from_raw(raw_update); + let delta2_update = AccountDelta::new( + account2, + storage_update.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + forest.update_account(block51, &delta2_update).unwrap(); + + let delta3_update = AccountDelta::new( + account3, + storage_update.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + forest.update_account(block52, &delta3_update).unwrap(); + + // Prune at block 52 + let total_roots_removed = forest.prune(block52); + assert_eq!(total_roots_removed, 0); + + // ensure the root is still accessible + let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block01); + assert!(account1_root_after_prune.is_some()); + + let delta1_update = + AccountDelta::new(account1, storage_update, AccountVaultDelta::default(), Felt::new(2)) + .unwrap(); + forest.update_account(block53, &delta1_update).unwrap(); + + // Prune at block 53 + let total_roots_removed = forest.prune(block53); + assert_eq!(total_roots_removed, 0); + + // Account2 and Account3 should still be accessible at their recent blocks + let account1_root = forest.get_storage_map_root(account1, &slot_name, block53).unwrap(); + let account2_root = forest.get_storage_map_root(account2, &slot_name, block51).unwrap(); + let account3_root = forest.get_storage_map_root(account3, &slot_name, block52).unwrap(); + + // Verify we can still get witnesses for account2 and account3 and verify against roots + let witness1_after = forest + .get_storage_map_witness(account2, &slot_name, block51, key1) + .expect("Account2 should still have accessible storage map after pruning account1"); + let witness2_after = forest + .get_storage_map_witness(account3, &slot_name, block52, key1) + .expect("Account3 should still have accessible storage map after pruning account1"); + + // Verify witnesses against storage map roots + let proof1: SmtProof = witness1_after.into(); + assert_eq!(proof1.compute_root(), account2_root,); + let proof2: SmtProof = witness2_after.into(); + assert_eq!(proof2.compute_root(), account3_root,); + let account1_witness = forest + .get_storage_map_witness(account1, &slot_name, block53, key1) + .expect("Account1 should still have accessible storage map after pruning"); + let account1_proof: SmtProof = account1_witness.into(); + assert_eq!(account1_proof.compute_root(), account1_root,); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_retains_latest_after_100_blocks_and_pruning() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::{HISTORICAL_BLOCK_RETENTION, InnerForest}; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let slot_map = StorageSlotName::mock(1); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + + // Block 1: Apply initial update with vault and storage + let block_1 = BlockNumber::from(1); + + // Create storage map with two entries + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + map_delta.insert(key2, value2); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + // Create vault with one asset + let asset = FungibleAsset::new(faucet_id, 100).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + // Capture the roots from block 1 + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + let initial_storage_map_root = + forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); + + // Blocks 2-100: Do nothing (no updates to this account) + // Simulate other activity by just advancing to block 100 + + let block_100 = BlockNumber::from(100); + + assert!(forest.get_vault_root(account_id, block_100).is_some()); + assert_matches!( + forest.get_storage_map_root(account_id, &slot_map, block_100), + Some(root) if root == initial_storage_map_root + ); + + let total_roots_removed = forest.prune(block_100); + + let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; + assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); + assert_eq!(total_roots_removed, 0); + + assert!(forest.get_vault_root(account_id, block_100).is_some()); + assert_matches!( + forest.get_storage_map_root(account_id, &slot_map, block_100), + Some(root) if root == initial_storage_map_root + ); + + let witness = forest.get_storage_map_witness(account_id, &slot_map, block_100, key1); + assert!(witness.is_ok()); + + // Now add an update at block 51 (within retention window) to test that old entries + // get pruned when newer entries exist + let block_51 = BlockNumber::from(51); + + // Update with new values + let value1_new = num_to_word(3000); + let mut map_delta_51 = StorageMapDelta::default(); + map_delta_51.insert(key1, value1_new); + + let raw_51 = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta_51))]); + let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); + + let asset_51 = FungibleAsset::new(faucet_id, 200).unwrap(); + let mut vault_delta_51 = AccountVaultDelta::default(); + vault_delta_51.add_asset(asset_51.into()).unwrap(); + + let delta_51 = + AccountDelta::new(account_id, storage_delta_51, vault_delta_51, Felt::new(51)).unwrap(); + + forest.update_account(block_51, &delta_51).unwrap(); + + // Prune again at block 100 + let total_roots_removed = forest.prune(block_100); + + assert_eq!(total_roots_removed, 0); + + let vault_root_at_51 = forest + .get_vault_root(account_id, block_51) + .expect("Should have vault root at block 51"); + let storage_root_at_51 = forest + .get_storage_map_root(account_id, &slot_map, block_51) + .expect("Should have storage root at block 51"); + + assert_ne!(vault_root_at_51, initial_vault_root); + + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_51, key1) + .expect("Should be able to get witness for key1"); + + let proof: SmtProof = witness.into(); + assert_eq!( + proof.compute_root(), + storage_root_at_51, + "Witness must verify against storage root" + ); + + let vault_root_at_1 = forest.get_vault_root(account_id, block_1); + assert!(vault_root_at_1.is_some()); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_vault_only() { + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Block 1: Create vault with asset + let block_1 = BlockNumber::from(1); + let asset = FungibleAsset::new(faucet_id, 500).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let delta_1 = + AccountDelta::new(account_id, AccountStorageDelta::default(), vault_delta, Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let total_roots_removed = forest.prune(block_100); + + // Vault from block 1 should NOT be pruned (it's the most recent) + assert_eq!( + total_roots_removed, 0, + "Should NOT prune vault root (it's the most recent for this account)" + ); + + // Verify vault is still accessible at block 1 + let vault_root_at_1 = forest + .get_vault_root(account_id, block_1) + .expect("Should still have vault root at block 1"); + assert_eq!(vault_root_at_1, initial_vault_root, "Vault root should be preserved"); + + // Verify we can get witnesses for the vault and verify against vault root + let witnesses = forest + .get_vault_asset_witnesses( + account_id, + block_1, + [AssetVaultKey::new_unchecked(asset.vault_key().into())].into(), + ) + .expect("Should be able to get vault witness after pruning"); + + assert_eq!(witnesses.len(), 1, "Should have one witness"); + let witness = &witnesses[0]; + let proof: SmtProof = witness.clone().into(); + assert_eq!( + proof.compute_root(), + vault_root_at_1, + "Vault witness must verify against vault root" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_storage_map_only() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let slot_map = StorageSlotName::mock(1); + let key1 = num_to_word(100); + let value1 = num_to_word(1000); + + // Block 1: Create storage map + let block_1 = BlockNumber::from(1); + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_storage_root = forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let total_roots_removed = forest.prune(block_100); + + // Storage map from block 1 should NOT be pruned (it's the most recent) + assert_eq!(total_roots_removed, 0, "No vault roots to prune"); + + // Verify storage map is still accessible at block 1 + let storage_root_at_1 = forest + .get_storage_map_root(account_id, &slot_map, block_1) + .expect("Should still have storage root at block 1"); + assert_eq!(storage_root_at_1, initial_storage_root, "Storage root should be preserved"); + + // Verify we can get witnesses for the storage map and verify against storage root + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_1, key1) + .expect("Should be able to get storage witness after pruning"); + + let proof: SmtProof = witness.into(); + assert_eq!( + proof.compute_root(), + storage_root_at_1, + "Storage witness must verify against storage root" + ); + + // Verify we can get all entries +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_storage_value_slot() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::StorageSlotDelta; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let slot_value = StorageSlotName::mock(1); + let value1 = num_to_word(5000); + + // Block 1: Create storage value slot + let block_1 = BlockNumber::from(1); + + let raw = BTreeMap::from_iter([(slot_value.clone(), StorageSlotDelta::Value(value1))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + // Note: Value slots don't have roots in InnerForest - they're just part of the + // account storage header. The InnerForest only tracks map slots. + // So there's nothing to verify for value slots in the forest. + + // This test documents that value slots are NOT tracked in InnerForest + // (they don't need to be, since their digest is 1:1 with the value) + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let total_roots_removed = forest.prune(block_100); + + // No roots should be pruned because there are no map slots + assert_eq!(total_roots_removed, 0, "No vault roots in this test"); + + // Verify no storage map roots exist for this account + let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_1); + assert!( + storage_root.is_none(), + "Value slots don't have storage map roots in InnerForest" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_mixed_slots_independently() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let slot_map_a = StorageSlotName::mock(1); + let slot_map_b = StorageSlotName::mock(2); + let slot_value = StorageSlotName::mock(3); + + let key1 = num_to_word(100); + let value1 = num_to_word(1000); + let value_slot_data = num_to_word(5000); + + // Block 1: Create vault + two map slots + one value slot + let block_1 = BlockNumber::from(1); + + let asset = FungibleAsset::new(faucet_id, 100).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(key1, value1); + + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(key1, value1); + + let raw = BTreeMap::from_iter([ + (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), + (slot_value.clone(), StorageSlotDelta::Value(value_slot_data)), + ]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + let initial_map_a_root = forest.get_storage_map_root(account_id, &slot_map_a, block_1).unwrap(); + let initial_map_b_root = forest.get_storage_map_root(account_id, &slot_map_b, block_1).unwrap(); + + // Block 51: Update only map_a (within retention window) + let block_51 = BlockNumber::from(51); + let value2 = num_to_word(2000); + + let mut map_delta_a_update = StorageMapDelta::default(); + map_delta_a_update.insert(key1, value2); + + let raw_51 = + BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_update))]); + let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); + + let delta_51 = AccountDelta::new( + account_id, + storage_delta_51, + AccountVaultDelta::default(), + Felt::new(51), + ) + .unwrap(); + + forest.update_account(block_51, &delta_51).unwrap(); + + // Advance to block 100 + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let total_roots_removed = forest.prune(block_100); + + // Vault: block 1 is most recent, should NOT be pruned + // Map A: block 1 is old (block 51 is newer), SHOULD be pruned + // Map B: block 1 is most recent, should NOT be pruned + assert_eq!( + total_roots_removed, 0, + "Vault root from block 1 should NOT be pruned (most recent)" + ); + + // Verify vault is still accessible + let vault_root_at_1 = + forest.get_vault_root(account_id, block_1).expect("Vault should be accessible"); + assert_eq!(vault_root_at_1, initial_vault_root, "Vault should be from block 1"); + + // Verify map_a is accessible (from block 51) + let map_a_root_at_51 = forest + .get_storage_map_root(account_id, &slot_map_a, block_51) + .expect("Map A should be accessible"); + assert_ne!( + map_a_root_at_51, initial_map_a_root, + "Map A should be from block 51, not block 1" + ); + + // Verify map_b is still accessible (from block 1) + let map_b_root_at_1 = forest + .get_storage_map_root(account_id, &slot_map_b, block_1) + .expect("Map B should be accessible"); + assert_eq!( + map_b_root_at_1, initial_map_b_root, + "Map B should still be from block 1 (most recent)" + ); + + // Verify map_a block 1 is no longer accessible + let map_a_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_a, block_1); + assert!(map_a_root_at_1.is_some(), "Map A block 1 should be pruned"); +} diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 0429864067..4b2376ae69 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,6 +1,20 @@ -use std::collections::{BTreeMap, BTreeSet}; - -use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; +use std::collections::BTreeSet; + +use miden_crypto::hash::rpo::Rpo256; +use miden_crypto::merkle::smt::{ + ForestInMemoryBackend, + ForestOperation, + LargeSmtForest, + LargeSmtForestError, + LineageId, + RootInfo, + SMT_DEPTH, + SmtUpdateBatch, + TreeId, +}; +use miden_crypto::merkle::{EmptySubtreeRoots, MerkleError}; +use miden_node_proto::domain::account::AccountStorageMapDetails; +use miden_node_utils::ErrorReport; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ AccountId, @@ -11,30 +25,33 @@ use miden_protocol::account::{ }; use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; -use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; -use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; use miden_protocol::errors::{AssetError, StorageMapError}; +use miden_protocol::utils::Serializable; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; +use tracing::instrument; + +use crate::COMPONENT; #[cfg(test)] mod tests; +// CONSTANTS +// ================================================================================================ + +/// Number of historical blocks to retain in the in-memory forest. +/// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be pruned. +pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; + // ERRORS // ================================================================================================ #[derive(Debug, Error)] pub enum InnerForestError { - #[error( - "balance underflow: account {account_id}, faucet {faucet_id}, \ - previous balance {prev_balance}, delta {delta}" - )] - BalanceUnderflow { - account_id: AccountId, - faucet_id: AccountId, - prev_balance: u64, - delta: i64, - }, + #[error(transparent)] + Asset(#[from] AssetError), + #[error(transparent)] + Forest(#[from] LargeSmtForestError), } #[derive(Debug, Error)] @@ -54,31 +71,19 @@ pub enum WitnessError { /// Container for forest-related state that needs to be updated atomically. pub(crate) struct InnerForest { - /// `SmtForest` for efficient account storage reconstruction. + /// `LargeSmtForest` for efficient account storage reconstruction. /// Populated during block import with storage and vault SMTs. - forest: SmtForest, - - /// Maps (`account_id`, `slot_name`, `block_num`) to SMT root. - /// Populated during block import for all storage map slots. - storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, - - /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. - /// Accumulated from deltas - each block's entries include all entries up to that point. - storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, - - /// Maps (`account_id`, `block_num`) to vault SMT root. - /// Tracks asset vault versions across all blocks with structural sharing. - vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, + forest: LargeSmtForest, } impl InnerForest { pub(crate) fn new() -> Self { - Self { - forest: SmtForest::new(), - storage_map_roots: BTreeMap::new(), - storage_entries: BTreeMap::new(), - vault_roots: BTreeMap::new(), - } + Self { forest: Self::create_forest() } + } + + fn create_forest() -> LargeSmtForest { + let backend = ForestInMemoryBackend::new(); + LargeSmtForest::new(backend).expect("in-memory backend should initialize") } // HELPERS @@ -89,38 +94,147 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } - /// Retrieves a vault root for the specified account at or before the specified block. + #[cfg(test)] + fn tree_id_for_root( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + ) -> TreeId { + let lineage = Self::storage_lineage_id(account_id, slot_name); + self.lookup_tree_id(lineage, block_num) + } + + #[cfg(test)] + fn tree_id_for_vault_root(&self, account_id: AccountId, block_num: BlockNumber) -> TreeId { + let lineage = Self::vault_lineage_id(account_id); + self.lookup_tree_id(lineage, block_num) + } + + #[expect(clippy::unused_self)] + fn lookup_tree_id(&self, lineage: LineageId, block_num: BlockNumber) -> TreeId { + TreeId::new(lineage, block_num.as_u64()) + } + + fn storage_lineage_id(account_id: AccountId, slot_name: &StorageSlotName) -> LineageId { + let mut bytes = Vec::new(); + bytes.extend_from_slice(&account_id.to_bytes()); + bytes.extend_from_slice(slot_name.as_str().as_bytes()); + LineageId::new(Rpo256::hash(&bytes).as_bytes()) + } + + fn vault_lineage_id(account_id: AccountId) -> LineageId { + LineageId::new(Rpo256::hash(&account_id.to_bytes()).as_bytes()) + } + + fn build_forest_operations( + entries: impl IntoIterator, + ) -> Vec { + entries + .into_iter() + .map(|(key, value)| { + if value == EMPTY_WORD { + ForestOperation::remove(key) + } else { + ForestOperation::insert(key, value) + } + }) + .collect() + } + + fn apply_forest_updates( + &mut self, + lineage: LineageId, + block_num: BlockNumber, + operations: Vec, + ) -> Word { + let updates = if operations.is_empty() { + SmtUpdateBatch::empty() + } else { + SmtUpdateBatch::new(operations.into_iter()) + }; + let version = block_num.as_u64(); + let tree = if self.forest.latest_version(lineage).is_some() { + self.forest + .update_tree(lineage, version, updates) + .expect("forest update should succeed") + } else { + self.forest + .add_lineage(lineage, version, updates) + .expect("forest update should succeed") + }; + tree.root() + } + + fn map_forest_error(error: LargeSmtForestError) -> MerkleError { + match error { + LargeSmtForestError::Merkle(merkle) => merkle, + other => MerkleError::InternalError(other.as_report()), + } + } + + fn map_forest_error_to_witness(error: LargeSmtForestError) -> WitnessError { + match error { + LargeSmtForestError::Merkle(merkle) => WitnessError::MerkleError(merkle), + other => WitnessError::MerkleError(MerkleError::InternalError(other.as_report())), + } + } + + // ACCESSORS + // -------------------------------------------------------------------------------------------- + + fn get_tree_id(&self, lineage: LineageId, block_num: BlockNumber) -> Option { + let tree = self.lookup_tree_id(lineage, block_num); + match self.forest.root_info(tree) { + RootInfo::LatestVersion(_) | RootInfo::HistoricalVersion(_) => Some(tree), + RootInfo::Missing => { + let latest_version = self.forest.latest_version(lineage)?; + if latest_version <= block_num.as_u64() { + Some(TreeId::new(lineage, latest_version)) + } else { + None + } + }, + } + } + + #[cfg(test)] + fn get_tree_root(&self, lineage: LineageId, block_num: BlockNumber) -> Option { + let tree = self.get_tree_id(lineage, block_num)?; + match self.forest.root_info(tree) { + RootInfo::LatestVersion(root) | RootInfo::HistoricalVersion(root) => Some(root), + RootInfo::Missing => None, + } + } + + /// Retrieves a vault root for the specified account and block. + #[cfg(test)] pub(crate) fn get_vault_root( &self, account_id: AccountId, block_num: BlockNumber, ) -> Option { - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) - .next_back() - .map(|(_, root)| *root) + let lineage = Self::vault_lineage_id(account_id); + self.get_tree_root(lineage, block_num) } - /// Retrieves the storage map root for an account slot at or before the specified block. + /// Retrieves the storage map root for an account slot at the specified block. + #[cfg(test)] pub(crate) fn get_storage_map_root( &self, account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, ) -> Option { - self.storage_map_roots - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), block_num), - ) - .next_back() - .map(|(_, root)| *root) + let lineage = Self::storage_lineage_id(account_id, slot_name); + self.get_tree_root(lineage, block_num) } + // WITNESSES and PROOFS + // -------------------------------------------------------------------------------------------- + /// Retrieves a storage map witness for the specified account and storage slot. /// - /// Finds the most recent witness at or before the specified block number. - /// /// Note that the `raw_key` is the raw, user-provided key that needs to be hashed in order to /// get the actual key into the storage map. pub(crate) fn get_storage_map_witness( @@ -130,11 +244,10 @@ impl InnerForest { block_num: BlockNumber, raw_key: Word, ) -> Result { + let lineage = Self::storage_lineage_id(account_id, slot_name); + let tree = self.get_tree_id(lineage, block_num).ok_or(WitnessError::RootNotFound)?; let key = StorageMap::hash_key(raw_key); - let root = self - .get_storage_map_root(account_id, slot_name, block_num) - .ok_or(WitnessError::RootNotFound)?; - let proof = self.forest.open(root, key)?; + let proof = self.forest.open(tree, key).map_err(Self::map_forest_error_to_witness)?; Ok(StorageMapWitness::new(proof, vec![raw_key])?) } @@ -147,72 +260,42 @@ impl InnerForest { block_num: BlockNumber, asset_keys: BTreeSet, ) -> Result, WitnessError> { - let root = self.get_vault_root(account_id, block_num).ok_or(WitnessError::RootNotFound)?; - let witnessees = asset_keys - .into_iter() - .map(|key| { - let proof = self.forest.open(root, key.into())?; + let lineage = Self::vault_lineage_id(account_id); + let tree = self.get_tree_id(lineage, block_num).ok_or(WitnessError::RootNotFound)?; + let witnessees: Result, WitnessError> = + Result::from_iter(asset_keys.into_iter().map(|key| { + let proof = self + .forest + .open(tree, key.into()) + .map_err(Self::map_forest_error_to_witness)?; let asset = AssetWitness::new(proof)?; Ok(asset) - }) - .collect::, WitnessError>>()?; - Ok(witnessees) + })); + witnessees } /// Opens a storage map and returns storage map details with SMT proofs for the given keys. /// /// Returns `None` if no storage root is tracked for this account/slot/block combination. /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. - pub(crate) fn open_storage_map( + pub(crate) fn get_storage_map_details_for_keys( &self, account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, raw_keys: &[Word], ) -> Option> { - let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; + let lineage = Self::storage_lineage_id(account_id, &slot_name); + let tree = self.get_tree_id(lineage, block_num)?; - // Collect SMT proofs for each key let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { let key = StorageMap::hash_key(*raw_key); - self.forest.open(root, key) + self.forest.open(tree, key).map_err(Self::map_forest_error) })); Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) } - /// Returns all key-value entries for a specific account storage slot at or before a block. - /// - /// Uses range query semantics: finds the most recent entries at or before `block_num`. - /// Returns `None` if no entries exist for this account/slot up to the given block. - /// Returns `LimitExceeded` if there are too many entries to return. - pub(crate) fn storage_map_entries( - &self, - account_id: AccountId, - slot_name: StorageSlotName, - block_num: BlockNumber, - ) -> Option { - // Find the most recent entries at or before block_num - let entries = self - .storage_entries - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), block_num), - ) - .next_back() - .map(|(_, entries)| entries)?; - - if entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { - return Some(AccountStorageMapDetails { - slot_name, - entries: StorageMapEntries::LimitExceeded, - }); - } - let entries = Vec::from_iter(entries.iter().map(|(k, v)| (*k, *v))); - - Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) - } - // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- @@ -229,6 +312,7 @@ impl InnerForest { /// # Errors /// /// Returns an error if applying a vault delta results in a negative balance. + #[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] pub(crate) fn apply_block_updates( &mut self, block_num: BlockNumber, @@ -245,6 +329,9 @@ impl InnerForest { "Updated forest with account delta" ); } + + self.prune(block_num); + Ok(()) } @@ -289,10 +376,8 @@ impl InnerForest { /// Retrieves the most recent vault SMT root for an account. If no vault root is found for the /// account, returns an empty SMT root. fn get_latest_vault_root(&self, account_id: AccountId) -> Word { - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) + let lineage = Self::vault_lineage_id(account_id); + self.forest.latest_root(lineage).unwrap_or_else(Self::empty_smt_root) } /// Inserts asset vault data into the forest for the specified account. Assumes that asset @@ -305,13 +390,25 @@ impl InnerForest { ) { // get the current vault root for the account, and make sure it is empty let prev_root = self.get_latest_vault_root(account_id); + let lineage = Self::vault_lineage_id(account_id); assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + assert!( + self.forest.latest_version(lineage).is_none(), + "account should not be in the forest" + ); - // if there are no assets in the vault, add a root of an empty SMT to the vault roots map - // so that the map has entries for all accounts, and then return (i.e., no need to insert - // anything into the forest) if delta.is_empty() { - self.vault_roots.insert((account_id, block_num), prev_root); + let lineage = Self::vault_lineage_id(account_id); + let new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + %new_root, + vault_entries = 0, + "Inserted vault into forest" + ); return; } @@ -326,25 +423,26 @@ impl InnerForest { } // process non-fungible assets - for (&asset, _action) in delta.non_fungible().iter() { - // TODO: assert that action is addition - entries.push((asset.vault_key().into(), asset.into())); + for (&asset, action) in delta.non_fungible().iter() { + let asset_vault_key = asset.vault_key().into(); + match action { + NonFungibleDeltaAction::Add => entries.push((asset_vault_key, asset.into())), + NonFungibleDeltaAction::Remove => entries.push((asset_vault_key, EMPTY_WORD)), + } } assert!(!entries.is_empty(), "non-empty delta should contain entries"); let num_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); - - self.vault_roots.insert((account_id, block_num), new_root); + let lineage = Self::vault_lineage_id(account_id); + let operations = Self::build_forest_operations(entries); + let new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, + %new_root, vault_entries = num_entries, "Inserted vault into forest" ); @@ -368,41 +466,39 @@ impl InnerForest { assert!(!delta.is_empty(), "expected the delta not to be empty"); // get the previous vault root; the root could be for an empty or non-empty SMT - let prev_root = self.get_latest_vault_root(account_id); + let lineage = Self::vault_lineage_id(account_id); + let prev_tree = + self.forest.latest_version(lineage).map(|version| TreeId::new(lineage, version)); let mut entries: Vec<(Word, Word)> = Vec::new(); // Process fungible assets for (faucet_id, amount_delta) in delta.fungible().iter() { - let key: Word = - FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); - - let new_amount = { - // amount delta is a change that must be applied to previous balance. - // - // TODO: SmtForest only exposes `fn open()` which computes a full Merkle proof. We - // only need the leaf, so a direct `fn get()` method would be faster. - let prev_amount = self - .forest - .open(prev_root, key) - .ok() - .and_then(|proof| proof.get(&key)) - .and_then(|word| FungibleAsset::try_from(word).ok()) - .map_or(0, |asset| asset.amount()); - - let new_balance = i128::from(prev_amount) + i128::from(*amount_delta); - u64::try_from(new_balance).map_err(|_| InnerForestError::BalanceUnderflow { - account_id, - faucet_id: *faucet_id, - prev_balance: prev_amount, - delta: *amount_delta, - })? + let delta_abs = amount_delta.unsigned_abs(); + let delta = FungibleAsset::new(*faucet_id, delta_abs)?; + let key = Word::from(delta.vault_key()); + + let empty = FungibleAsset::new(*faucet_id, 0)?; + let asset = if let Some(tree) = prev_tree { + self.forest + .get(tree, key)? + .map(FungibleAsset::try_from) + .transpose()? + .unwrap_or(empty) + } else { + empty + }; + + let updated = if *amount_delta < 0 { + asset.sub(delta)? + } else { + asset.add(delta)? }; - let value = if new_amount == 0 { + let value = if updated.amount() == 0 { EMPTY_WORD } else { - FungibleAsset::new(*faucet_id, new_amount).expect("valid fungible asset").into() + Word::from(updated) }; entries.push((key, value)); } @@ -416,21 +512,18 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - assert!(!entries.is_empty(), "non-empty delta should contain entries"); - let num_entries = entries.len(); + let vault_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); - - self.vault_roots.insert((account_id, block_num), new_root); + let lineage = Self::vault_lineage_id(account_id); + let operations = Self::build_forest_operations(entries); + let new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, - vault_entries = num_entries, + %new_root, + %vault_entries, "Updated vault in forest" ); Ok(()) @@ -446,30 +539,8 @@ impl InnerForest { account_id: AccountId, slot_name: &StorageSlotName, ) -> Word { - self.storage_map_roots - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), - ) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) - } - - /// Retrieves the most recent entries in the specified storage map. If no storage map exists - /// returns an empty map. - fn get_latest_storage_map_entries( - &self, - account_id: AccountId, - slot_name: &StorageSlotName, - ) -> BTreeMap { - self.storage_entries - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), - ) - .next_back() - .map(|(_, entries)| entries.clone()) - .unwrap_or_default() + let lineage = Self::storage_lineage_id(account_id, slot_name); + self.forest.latest_root(lineage).map_or_else(Self::empty_smt_root, |root| root) } /// Inserts all storage maps from the provided storage delta into the forest. @@ -489,60 +560,44 @@ impl InnerForest { // build a vector of raw entries and filter out any empty values; such values // shouldn't be present in full-state deltas, but it is good to exclude them // explicitly - let raw_map_entries: Vec<(Word, Word)> = map_delta - .entries() - .iter() - .filter_map(|(&key, &value)| { + let raw_map_entries: Vec<(Word, Word)> = + Vec::from_iter(map_delta.entries().iter().filter_map(|(&key, &value)| { if value == EMPTY_WORD { None } else { Some((Word::from(key), value)) } - }) - .collect(); + })); - // if the delta is empty, make sure we create an entry in the storage map roots map - // and storage entries map (so storage_map_entries() queries work) if raw_map_entries.is_empty() { - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), prev_root); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); + let lineage = Self::storage_lineage_id(account_id, slot_name); + let _new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); continue; } - // hash the keys before inserting into the forest, matching how `StorageMap` - // hashes keys before inserting into the SMT. - let hashed_entries: Vec<(Word, Word)> = raw_map_entries - .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) - .collect(); - - // insert the updates into the forest and update storage map roots map - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); + let hashed_entries = Vec::from_iter( + raw_map_entries + .iter() + .map(|(raw_key, value)| (StorageMap::hash_key(*raw_key), *value)), + ); - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); + let lineage = Self::storage_lineage_id(account_id, slot_name); + assert!( + self.forest.latest_version(lineage).is_none(), + "account should not be in the forest" + ); + let operations = Self::build_forest_operations(hashed_entries); + let new_root = self.apply_forest_updates(lineage, block_num, operations); - assert!(!raw_map_entries.is_empty(), "a non-empty delta should have entries"); let num_entries = raw_map_entries.len(); - // keep track of the state of storage map entries (using raw keys for delta merging) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let map_entries = BTreeMap::from_iter(raw_map_entries); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), map_entries); - tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, ?slot_name, + %new_root, delta_entries = num_entries, "Inserted storage map into forest" ); @@ -559,8 +614,6 @@ impl InnerForest { account_id: AccountId, delta: &AccountStorageDelta, ) { - assert!(!delta.is_empty(), "expected the delta not to be empty"); - for (slot_name, map_delta) in delta.maps() { // map delta shouldn't be empty, but if it is for some reason, there is nothing to do if map_delta.is_empty() { @@ -568,48 +621,50 @@ impl InnerForest { } // update the storage map tree in the forest and add an entry to the storage map roots - let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - let delta_entries: Vec<(Word, Word)> = - map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); - - // Hash the keys before inserting into the forest, matching how StorageMap - // hashes keys before inserting into the SMT. - let hashed_entries: Vec<(Word, Word)> = delta_entries - .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) - .collect(); - - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); - - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); - - // merge the delta with the latest entries in the map (using raw keys) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let mut latest_entries = self.get_latest_storage_map_entries(account_id, slot_name); - for (key, value) in &delta_entries { - if *value == EMPTY_WORD { - latest_entries.remove(key); - } else { - latest_entries.insert(*key, *value); - } - } + let lineage = Self::storage_lineage_id(account_id, slot_name); + let delta_entries: Vec<(Word, Word)> = Vec::from_iter( + map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)), + ); + + let hashed_entries = Vec::from_iter( + delta_entries + .iter() + .map(|(raw_key, value)| (StorageMap::hash_key(*raw_key), *value)), + ); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), latest_entries); + let operations = Self::build_forest_operations(hashed_entries); + let new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, ?slot_name, + %new_root, delta_entries = delta_entries.len(), "Updated storage map in forest" ); } } + + // PRUNING + // -------------------------------------------------------------------------------------------- + + /// Prunes old entries from the in-memory forest data structures. + /// + /// The `LargeSmtForest` itself is truncated to drop historical versions beyond the cutoff. + /// + /// Returns the number of pruned roots for observability. + #[instrument(target = COMPONENT, skip_all, ret, fields(block.number = %chain_tip))] + pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> usize { + let cutoff_block = chain_tip + .checked_sub(HISTORICAL_BLOCK_RETENTION) + .unwrap_or(BlockNumber::GENESIS); + let before = self.forest.roots().count(); + + self.forest.truncate(cutoff_block.as_u64()); + + let after = self.forest.roots().count(); + before.saturating_sub(after) + } } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 5fc0cc6c0c..76273404d1 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -1,8 +1,12 @@ +use assert_matches::assert_matches; +use miden_node_proto::domain::account::StorageMapEntries; use miden_protocol::account::AccountCode; -use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; +use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; use miden_protocol::{Felt, FieldElement}; @@ -26,7 +30,6 @@ fn dummy_partial_delta( vault_delta: AccountVaultDelta, storage_delta: AccountStorageDelta, ) -> AccountDelta { - // For partial deltas, nonce_delta must be > 0 if there are changes let nonce_delta = if vault_delta.is_empty() && storage_delta.is_empty() { Felt::ZERO } else { @@ -39,43 +42,36 @@ fn dummy_partial_delta( fn dummy_full_state_delta(account_id: AccountId, assets: &[Asset]) -> AccountDelta { use miden_protocol::account::{Account, AccountStorage}; - // Create a minimal account with the given assets let vault = AssetVault::new(assets).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); let nonce = Felt::ONE; let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); - - // Convert to delta - this will be a full-state delta because it has code AccountDelta::try_from(account).unwrap() } +// INITIALIZATION & BASIC OPERATIONS +// ================================================================================================ + #[test] -fn test_empty_smt_root_is_recognized() { - use miden_protocol::crypto::merkle::smt::Smt; +fn empty_smt_root_is_recognized() { + use miden_crypto::merkle::smt::Smt; let empty_root = InnerForest::empty_smt_root(); - // Verify an empty SMT has the expected root assert_eq!(Smt::default().root(), empty_root); - - // Test that SmtForest accepts this root in batch_insert - let mut forest = SmtForest::new(); - let entries = vec![(Word::from([1u32, 2, 3, 4]), Word::from([5u32, 6, 7, 8]))]; - - assert!(forest.batch_insert(empty_root, entries).is_ok()); } #[test] -fn test_inner_forest_basic_initialization() { +fn inner_forest_basic_initialization() { let forest = InnerForest::new(); - assert!(forest.storage_map_roots.is_empty()); - assert!(forest.vault_roots.is_empty()); + assert_eq!(forest.forest.lineage_count(), 0); + assert_eq!(forest.forest.tree_count(), 0); } #[test] -fn test_update_account_with_empty_deltas() { +fn update_account_with_empty_deltas() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); @@ -88,37 +84,21 @@ fn test_update_account_with_empty_deltas() { forest.update_account(block_num, &delta).unwrap(); - // Empty deltas should not create entries - assert!(!forest.vault_roots.contains_key(&(account_id, block_num))); - assert!(forest.storage_map_roots.is_empty()); + assert!(forest.get_vault_root(account_id, block_num).is_none()); + assert_eq!(forest.forest.lineage_count(), 0); } -#[test] -fn test_update_vault_with_fungible_asset() { - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let block_num = BlockNumber::GENESIS.child(); - - let asset = dummy_fungible_asset(faucet_id, 100); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta.add_asset(asset).unwrap(); - - let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); - forest.update_account(block_num, &delta).unwrap(); - - let vault_root = forest.vault_roots[&(account_id, block_num)]; - assert_ne!(vault_root, EMPTY_WORD); -} +// VAULT TESTS +// ================================================================================================ #[test] -fn test_compare_partial_vs_full_state_delta_vault() { +fn vault_partial_vs_full_state_produces_same_root() { let account_id = dummy_account(); let faucet_id = dummy_faucet(); let block_num = BlockNumber::GENESIS.child(); let asset = dummy_fungible_asset(faucet_id, 100); - // Approach 1: Partial delta (simulates block application) + // Partial delta (block application) let mut forest_partial = InnerForest::new(); let mut vault_delta = AccountVaultDelta::default(); vault_delta.add_asset(asset).unwrap(); @@ -126,239 +106,193 @@ fn test_compare_partial_vs_full_state_delta_vault() { dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); forest_partial.update_account(block_num, &partial_delta).unwrap(); - // Approach 2: Full-state delta (simulates DB reconstruction) + // Full-state delta (DB reconstruction) let mut forest_full = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[asset]); forest_full.update_account(block_num, &full_delta).unwrap(); - // Both approaches must produce identical vault roots - let root_partial = forest_partial.vault_roots.get(&(account_id, block_num)).unwrap(); - let root_full = forest_full.vault_roots.get(&(account_id, block_num)).unwrap(); + let root_partial = forest_partial.get_vault_root(account_id, block_num).unwrap(); + let root_full = forest_full.get_vault_root(account_id, block_num).unwrap(); assert_eq!(root_partial, root_full); - assert_ne!(*root_partial, EMPTY_WORD); + assert_ne!(root_partial, EMPTY_WORD); } #[test] -fn test_incremental_vault_updates() { +fn vault_incremental_updates_with_add_and_remove() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); - // Block 1: 100 tokens - let block_1 = BlockNumber::GENESIS.child(); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); - let root_1 = forest.vault_roots[&(account_id, block_1)]; - - // Block 2: 150 tokens (update) - let block_2 = block_1.child(); - let mut vault_delta_2 = AccountVaultDelta::default(); - vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); - let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); - forest.update_account(block_2, &delta_2).unwrap(); - let root_2 = forest.vault_roots[&(account_id, block_2)]; - - assert_ne!(root_1, root_2); -} - -#[test] -fn test_vault_state_persists_across_blocks_without_changes() { - // Regression test for issue #7: vault state should persist across blocks - // where no changes occur, not reset to empty. - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - - // Helper to query vault root at or before a block (range query) - let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { - forest - .vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) - .next_back() - .map(|(_, root)| *root) - }; - // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; - - // Blocks 2-5: No changes to this account (simulated by not calling update_account) - // This means no entries are added to vault_roots for these blocks. - - // Block 6: Add 50 more tokens - // The previous root lookup should find block_1's root, not return empty. - let block_6 = BlockNumber::from(6); - let mut vault_delta_6 = AccountVaultDelta::default(); - vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); // 100 + 50 = 150 - let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); - forest.update_account(block_6, &delta_6).unwrap(); - - // The root at block 6 should be different from block 1 (we added more tokens) - let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; - assert_ne!(root_after_block_1, root_after_block_6); - - // Verify range query finds the correct previous root for intermediate blocks - // Block 3 should return block 1's root (most recent before block 3) - let root_at_block_3 = get_vault_root(&forest, account_id, BlockNumber::from(3)); - assert_eq!(root_at_block_3, Some(root_after_block_1)); - - // Block 5 should also return block 1's root - let root_at_block_5 = get_vault_root(&forest, account_id, BlockNumber::from(5)); - assert_eq!(root_at_block_5, Some(root_after_block_1)); - - // Block 6 should return block 6's root - let root_at_block_6 = get_vault_root(&forest, account_id, block_6); - assert_eq!(root_at_block_6, Some(root_after_block_6)); -} - -#[test] -fn test_partial_delta_applies_fungible_changes_correctly() { - // Regression test for issue #8: partial deltas should apply changes to previous balance, - // not treat amounts as absolute values. - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - - // Block 1: Add 100 tokens (partial delta with +100) - let block_1 = BlockNumber::GENESIS.child(); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); - let root_after_100 = forest.vault_roots[&(account_id, block_1)]; + let root_after_100 = forest.get_vault_root(account_id, block_1).unwrap(); - // Block 2: Add 50 more tokens (partial delta with +50) - // Result should be 150 tokens, not 50 tokens + // Block 2: Add 50 more tokens (result: 150 tokens) let block_2 = block_1.child(); let mut vault_delta_2 = AccountVaultDelta::default(); vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); forest.update_account(block_2, &delta_2).unwrap(); - let root_after_150 = forest.vault_roots[&(account_id, block_2)]; + let root_after_150 = forest.get_vault_root(account_id, block_2).unwrap(); - // Roots should be different (100 tokens vs 150 tokens) assert_ne!(root_after_100, root_after_150); - // Block 3: Remove 30 tokens (partial delta with -30) - // Result should be 120 tokens + // Block 3: Remove 30 tokens (result: 120 tokens) let block_3 = block_2.child(); let mut vault_delta_3 = AccountVaultDelta::default(); vault_delta_3.remove_asset(dummy_fungible_asset(faucet_id, 30)).unwrap(); let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); forest.update_account(block_3, &delta_3).unwrap(); - let root_after_120 = forest.vault_roots[&(account_id, block_3)]; + let root_after_120 = forest.get_vault_root(account_id, block_3).unwrap(); - // Root should change again assert_ne!(root_after_150, root_after_120); - // Verify by creating a fresh forest with a full-state delta of 120 tokens - // The roots should match + // Verify by comparing to full-state delta let mut fresh_forest = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 120)]); fresh_forest.update_account(block_3, &full_delta).unwrap(); - let root_full_state_120 = fresh_forest.vault_roots[&(account_id, block_3)]; + let root_full_state_120 = fresh_forest.get_vault_root(account_id, block_3).unwrap(); assert_eq!(root_after_120, root_full_state_120); } #[test] -fn test_partial_delta_across_long_block_range() { - // Validation test: partial deltas should work across 101+ blocks. - // - // This test passes now because InnerForest keeps all history. Once pruning is implemented - // (estimated ~50 blocks), this test will fail unless DB fallback is also implemented. - // When that happens, the test should be updated to use DB fallback or converted to an - // integration test that has DB access. +fn forest_versions_are_continuous_for_sequential_updates() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(9); + let raw_key = Word::from([1u32, 0, 0, 0]); + let storage_key = StorageMap::hash_key(raw_key); + let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); + + for i in 1..=3u32 { + let block_num = BlockNumber::from(i); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, u64::from(i) * 10)) + .unwrap(); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(raw_key, Word::from([i, 0, 0, 0])); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let vault_tree = forest.tree_id_for_vault_root(account_id, block_num); + let storage_tree = forest.tree_id_for_root(account_id, &slot_name, block_num); + + assert_matches!(forest.forest.open(vault_tree, asset_key), Ok(_)); + assert_matches!(forest.forest.open(storage_tree, storage_key), Ok(_)); + } +} + +#[test] +fn vault_state_is_not_available_for_block_gaps() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); - // Block 1: Add 1000 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_1000 = forest.vault_roots[&(account_id, block_1)]; - - // Blocks 2-100: No changes to this account (simulating long gap) - - // Block 101: Add 500 more tokens (partial delta with +500) - // This requires looking up block 1's state across a 100-block gap. - let block_101 = BlockNumber::from(101); - let mut vault_delta_101 = AccountVaultDelta::default(); - vault_delta_101.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); - let delta_101 = - dummy_partial_delta(account_id, vault_delta_101, AccountStorageDelta::default()); - forest.update_account(block_101, &delta_101).unwrap(); - let root_after_1500 = forest.vault_roots[&(account_id, block_101)]; - - // Roots should be different (1000 tokens vs 1500 tokens) - assert_ne!(root_after_1000, root_after_1500); - // Verify the final state matches a fresh forest with 1500 tokens - let mut fresh_forest = InnerForest::new(); - let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 1500)]); - fresh_forest.update_account(block_101, &full_delta).unwrap(); - let root_full_state_1500 = fresh_forest.vault_roots[&(account_id, block_101)]; + let block_6 = BlockNumber::from(6); + let mut vault_delta_6 = AccountVaultDelta::default(); + vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); + let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); + forest.update_account(block_6, &delta_6).unwrap(); - assert_eq!(root_after_1500, root_full_state_1500); + assert!(forest.get_vault_root(account_id, BlockNumber::from(3)).is_some()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(5)).is_some()); + assert!(forest.get_vault_root(account_id, block_6).is_some()); } #[test] -fn test_update_storage_map() { +fn witness_queries_work_with_sparse_lineage_updates() { use std::collections::BTreeMap; + use assert_matches::assert_matches; use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; let mut forest = InnerForest::new(); let account_id = dummy_account(); - let block_num = BlockNumber::GENESIS.child(); + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(6); + let raw_key = Word::from([1u32, 0, 0, 0]); + let value = Word::from([9u32, 0, 0, 0]); - let slot_name = StorageSlotName::mock(3); - let key = Word::from([1u32, 2, 3, 4]); - let value = Word::from([5u32, 6, 7, 8]); + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(raw_key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key, value); - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); + let block_3 = block_1.child().child(); + let mut vault_delta_3 = AccountVaultDelta::default(); + vault_delta_3.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); + let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); + forest.update_account(block_3, &delta_3).unwrap(); - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); + let block_2 = block_1.child(); + let asset_key = FungibleAsset::new(faucet_id, 0).unwrap().vault_key(); + let witnesses = forest + .get_vault_asset_witnesses(account_id, block_2, [asset_key].into()) + .unwrap(); + let proof: SmtProof = witnesses[0].clone().into(); + let root_at_2 = forest.get_vault_root(account_id, block_2).unwrap(); + assert_eq!(proof.compute_root(), root_at_2); - // Verify storage root was created - assert!( + let storage_witness = forest + .get_storage_map_witness(account_id, &slot_name, block_2, raw_key) + .unwrap(); + let storage_root_at_2 = forest.get_storage_map_root(account_id, &slot_name, block_2).unwrap(); + let storage_proof: SmtProof = storage_witness.into(); + assert_eq!(storage_proof.compute_root(), storage_root_at_2); + + let storage_witness_at_3 = forest + .get_storage_map_witness(account_id, &slot_name, block_3, raw_key) + .unwrap(); + let storage_root_at_3 = forest.get_storage_map_root(account_id, &slot_name, block_3).unwrap(); + let storage_proof_at_3: SmtProof = storage_witness_at_3.into(); + assert_eq!(storage_proof_at_3.compute_root(), storage_root_at_3); + + let vault_root_at_3 = forest.get_vault_root(account_id, block_3).unwrap(); + assert_matches!( forest - .storage_map_roots - .contains_key(&(account_id, slot_name.clone(), block_num)) + .forest + .open(forest.tree_id_for_vault_root(account_id, block_3), asset_key.into()), + Ok(_) ); - let storage_root = forest.storage_map_roots[&(account_id, slot_name, block_num)]; - assert_ne!(storage_root, InnerForest::empty_smt_root()); + assert_ne!(vault_root_at_3, InnerForest::empty_smt_root()); } #[test] -fn test_full_state_delta_with_empty_vault_records_root() { - // Regression test for issue #1581: full-state deltas with empty vaults must still record - // the vault root so that subsequent `get_vault_asset_witnesses` calls succeed. - // - // The network counter account from the network monitor has an empty vault (it only uses - // storage slots). Without this fix, `get_vault_asset_witnesses` fails with "root not found" - // because no vault root was ever recorded for the account. +fn vault_full_state_with_empty_vault_records_root() { use miden_protocol::account::{Account, AccountStorage}; let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); - // Create a full-state delta with an empty vault (like the network counter account). let vault = AssetVault::new(&[]).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); @@ -366,27 +300,14 @@ fn test_full_state_delta_with_empty_vault_records_root() { let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); let full_delta = AccountDelta::try_from(account).unwrap(); - // Sanity check: the vault delta should be empty. assert!(full_delta.vault().is_empty()); assert!(full_delta.is_full_state()); forest.update_account(block_num, &full_delta).unwrap(); - // The vault root must be recorded even though the vault is empty. - assert!( - forest.vault_roots.contains_key(&(account_id, block_num)), - "vault root should be recorded for full-state deltas with empty vaults" - ); - - // Verify the recorded root is the empty SMT root. - let recorded_root = forest.vault_roots[&(account_id, block_num)]; - assert_eq!( - recorded_root, - InnerForest::empty_smt_root(), - "empty vault should have the empty SMT root" - ); + let recorded_root = forest.get_vault_root(account_id, block_num); + assert_eq!(recorded_root, Some(InnerForest::empty_smt_root())); - // Verify `get_vault_asset_witnesses` succeeds (returns empty witnesses for empty keys). let witnesses = forest .get_vault_asset_witnesses(account_id, block_num, std::collections::BTreeSet::new()) .expect("get_vault_asset_witnesses should succeed for accounts with empty vaults"); @@ -394,7 +315,63 @@ fn test_full_state_delta_with_empty_vault_records_root() { } #[test] -fn test_storage_map_incremental_updates() { +fn vault_shared_root_retained_when_one_entry_pruned() { + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let faucet_id = dummy_faucet(); + let block_1 = BlockNumber::GENESIS.child(); + let asset_amount = u64::from(HISTORICAL_BLOCK_RETENTION); + let amount_increment = asset_amount / u64::from(HISTORICAL_BLOCK_RETENTION); + let asset = dummy_fungible_asset(faucet_id, asset_amount); + let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); + + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(asset).unwrap(); + let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, asset_amount)).unwrap(); + let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_2).unwrap(); + + let root1 = forest.get_vault_root(account1, block_1).unwrap(); + let root2 = forest.get_vault_root(account2, block_1).unwrap(); + assert_eq!(root1, root2); + + let block_at_51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); + let mut vault_delta_2_update = AccountVaultDelta::default(); + vault_delta_2_update + .add_asset(dummy_fungible_asset(faucet_id, amount_increment)) + .unwrap(); + let delta_2_update = + dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); + forest.update_account(block_at_51, &delta_2_update).unwrap(); + + let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); + let total_roots_removed = forest.prune(block_at_52); + + assert_eq!(total_roots_removed, 0); + assert!(forest.get_vault_root(account1, block_1).is_some()); + assert!(forest.get_vault_root(account2, block_1).is_some()); + + let vault_root_at_52 = forest.get_vault_root(account1, block_at_52); + assert_eq!(vault_root_at_52, Some(root1)); + + let witnesses = forest + .get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()) + .unwrap(); + assert_eq!(witnesses.len(), 1); + let proof: SmtProof = witnesses[0].clone().into(); + assert_eq!(proof.compute_root(), root1); +} + +// STORAGE MAP TESTS +// ================================================================================================ + +#[test] +fn storage_map_incremental_updates() { use std::collections::BTreeMap; use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; @@ -417,9 +394,9 @@ fn test_storage_map_incremental_updates() { let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); forest.update_account(block_1, &delta_1).unwrap(); - let root_1 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_1)]; + let root_1 = forest.get_storage_map_root(account_id, &slot_name, block_1).unwrap(); - // Block 2: Insert key2 -> value2 (key1 should persist) + // Block 2: Insert key2 -> value2 let block_2 = block_1.child(); let mut map_delta_2 = StorageMapDelta::default(); map_delta_2.insert(key2, value2); @@ -427,7 +404,7 @@ fn test_storage_map_incremental_updates() { let storage_delta_2 = AccountStorageDelta::from_raw(raw_2); let delta_2 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_2); forest.update_account(block_2, &delta_2).unwrap(); - let root_2 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_2)]; + let root_2 = forest.get_storage_map_root(account_id, &slot_name, block_2).unwrap(); // Block 3: Update key1 -> value3 let block_3 = block_2.child(); @@ -437,16 +414,65 @@ fn test_storage_map_incremental_updates() { let storage_delta_3 = AccountStorageDelta::from_raw(raw_3); let delta_3 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_3); forest.update_account(block_3, &delta_3).unwrap(); - let root_3 = forest.storage_map_roots[&(account_id, slot_name, block_3)]; + let root_3 = forest.get_storage_map_root(account_id, &slot_name, block_3).unwrap(); - // All roots should be different assert_ne!(root_1, root_2); assert_ne!(root_2, root_3); assert_ne!(root_1, root_3); } #[test] -fn test_empty_storage_map_entries_query() { +fn storage_map_state_is_not_available_for_block_gaps() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + const BLOCK_FIRST: u32 = 1; + const BLOCK_SECOND: u32 = 4; + const BLOCK_QUERY_ONE: u32 = 2; + const BLOCK_QUERY_TWO: u32 = 3; + const KEY_VALUE: u32 = 7; + const VALUE_FIRST: u32 = 10; + const VALUE_SECOND: u32 = 20; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(4); + let raw_key = Word::from([KEY_VALUE, 0, 0, 0]); + + let block_1 = BlockNumber::from(BLOCK_FIRST); + let mut map_delta_1 = StorageMapDelta::default(); + let value_1 = Word::from([VALUE_FIRST, 0, 0, 0]); + map_delta_1.insert(raw_key, value_1); + let raw_1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + let block_4 = BlockNumber::from(BLOCK_SECOND); + let mut map_delta_4 = StorageMapDelta::default(); + let value_2 = Word::from([VALUE_SECOND, 0, 0, 0]); + map_delta_4.insert(raw_key, value_2); + let raw_4 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_4))]); + let storage_delta_4 = AccountStorageDelta::from_raw(raw_4); + let delta_4 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_4); + forest.update_account(block_4, &delta_4).unwrap(); + + assert!( + forest + .get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_ONE)) + .is_some() + ); + assert!( + forest + .get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_TWO)) + .is_some() + ); + assert!(forest.get_storage_map_root(account_id, &slot_name, block_4).is_some()); +} + +#[test] +fn storage_map_empty_entries_query() { use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::{ AccountBuilder, @@ -463,7 +489,6 @@ fn test_empty_storage_map_entries_query() { let block_num = BlockNumber::GENESIS.child(); let slot_name = StorageSlotName::mock(0); - // Create an account with an empty storage map slot let storage_map = StorageMap::with_entries(vec![]).unwrap(); let component_storage = vec![StorageSlot::with_map(slot_name.clone(), storage_map)]; @@ -483,38 +508,427 @@ fn test_empty_storage_map_entries_query() { .unwrap(); let account_id = account.id(); - - // Convert to full-state delta (this triggers insert_account_storage path) let full_delta = AccountDelta::try_from(account).unwrap(); - assert!(full_delta.is_full_state(), "delta should be full-state"); + assert!(full_delta.is_full_state()); - // Apply the delta forest.update_account(block_num, &full_delta).unwrap(); - // Verify storage_map_roots has an entry - assert!( - forest - .storage_map_roots - .contains_key(&(account_id, slot_name.clone(), block_num)), - "storage_map_roots should have an entry for the empty map" - ); + let root = forest.get_storage_map_root(account_id, &slot_name, block_num); + assert_eq!(root, Some(InnerForest::empty_smt_root())); +} + +#[test] +fn storage_map_open_returns_proofs() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(3); + let block_num = BlockNumber::GENESIS.child(); + + let mut map_delta = StorageMapDelta::default(); + for i in 0..20u32 { + let key = Word::from([i, 0, 0, 0]); + let value = Word::from([0, 0, 0, i]); + map_delta.insert(key, value); + } + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let keys: Vec = (0..20u32).map(|i| Word::from([i, 0, 0, 0])).collect(); + let result = + forest.get_storage_map_details_for_keys(account_id, slot_name.clone(), block_num, &keys); + + let details = result.expect("Should return Some").expect("Should not error"); + assert_matches!(details.entries, StorageMapEntries::EntriesWithProofs(entries) => { + assert_eq!(entries.len(), keys.len()); + }); +} + +#[test] +fn storage_map_key_hashing_and_raw_entries_are_consistent() { + use std::collections::BTreeMap; + + use miden_protocol::account::StorageMap; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + const SLOT_INDEX: usize = 4; + const KEY_VALUE: u32 = 11; + const VALUE_VALUE: u32 = 22; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(SLOT_INDEX); + let block_num = BlockNumber::GENESIS.child(); + let raw_key = Word::from([KEY_VALUE, 0, 0, 0]); + let value = Word::from([VALUE_VALUE, 0, 0, 0]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(raw_key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let root = forest.get_storage_map_root(account_id, &slot_name, block_num).unwrap(); + + let witness = forest + .get_storage_map_witness(account_id, &slot_name, block_num, raw_key) + .unwrap(); + let proof: SmtProof = witness.into(); + let hashed_key = StorageMap::hash_key(raw_key); + // Witness proofs use hashed keys because SMT leaves are keyed by the hash. + assert_eq!(proof.compute_root(), root); + assert_eq!(proof.get(&hashed_key), Some(value)); + // Raw keys never appear in SMT proofs, only their hashed counterparts. + assert_eq!(proof.get(&raw_key), None); +} + +// PRUNING TESTS +// ================================================================================================ + +const TEST_CHAIN_LENGTH: u32 = 100; +const TEST_AMOUNT_MULTIPLIER: u32 = 100; +const TEST_PRUNE_CHAIN_TIP: u32 = HISTORICAL_BLOCK_RETENTION + 5; + +#[test] +fn prune_handles_empty_forest() { + let mut forest = InnerForest::new(); - // Verify storage_map_entries returns Some (not None) - this is the bug fix validation - let result = forest.storage_map_entries(account_id, slot_name.clone(), block_num); - assert!(result.is_some(), "storage_map_entries should return Some for empty maps"); - - // Verify the entries are empty - let details = result.unwrap(); - assert_eq!(details.slot_name, slot_name); - match details.entries { - StorageMapEntries::AllEntries(entries) => { - assert!(entries.is_empty(), "entries should be empty for an empty map"); - }, - StorageMapEntries::LimitExceeded => { - panic!("should not exceed limit for empty map"); - }, - StorageMapEntries::EntriesWithProofs(_) => { - panic!("should not have proofs for empty map query"); - }, + let total_roots_removed = forest.prune(BlockNumber::GENESIS); + + assert_eq!(total_roots_removed, 0); +} + +#[test] +fn prune_removes_smt_roots_from_forest() { + use miden_protocol::account::delta::StorageMapDelta; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(7); + + for i in 1..=TEST_PRUNE_CHAIN_TIP { + let block_num = BlockNumber::from(i); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) + .unwrap(); + let storage_delta = if i.is_multiple_of(3) { + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(Word::from([1u32, 0, 0, 0]), Word::from([99u32, i, i * i, i * i * i])); + let asd = AccountStorageDelta::new(); + asd.add_updated_maps([(slot_name.clone(), map_delta)]) + } else { + AccountStorageDelta::default() + }; + + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block_num, &delta).unwrap(); } + + let retained_block = BlockNumber::from(TEST_PRUNE_CHAIN_TIP); + let pruned_block = BlockNumber::from(3u32); + + let total_roots_removed = forest.prune(retained_block); + assert_eq!(total_roots_removed, 0); + assert!(forest.get_vault_root(account_id, retained_block).is_some()); + assert!(forest.get_vault_root(account_id, pruned_block).is_none()); + assert!(forest.get_storage_map_root(account_id, &slot_name, pruned_block).is_none()); + assert!(forest.get_storage_map_root(account_id, &slot_name, retained_block).is_some()); + + let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); + let retained_tree = forest.tree_id_for_vault_root(account_id, retained_block); + let pruned_tree = forest.tree_id_for_vault_root(account_id, pruned_block); + assert_matches!(forest.forest.open(retained_tree, asset_key), Ok(_)); + assert_matches!(forest.forest.open(pruned_tree, asset_key), Err(_)); + + let storage_key = StorageMap::hash_key(Word::from([1u32, 0, 0, 0])); + let storage_tree = forest.tree_id_for_root(account_id, &slot_name, pruned_block); + assert_matches!(forest.forest.open(storage_tree, storage_key), Err(_)); +} + +#[test] +fn prune_respects_retention_boundary() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + for i in 1..=HISTORICAL_BLOCK_RETENTION { + let block_num = BlockNumber::from(i); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) + .unwrap(); + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + } + + let total_roots_removed = forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); + + assert_eq!(total_roots_removed, 0); + assert_eq!(forest.forest.tree_count(), 11); +} + +#[test] +fn prune_roots_removes_old_entries() { + use miden_protocol::account::delta::StorageMapDelta; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(3); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); + + let key = Word::from([i, i * i, 5, 4]); + let value = Word::from([0, 0, i * i * i, 77]); + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let storage_delta = + AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); + + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.forest.tree_count(), 22); + + let total_roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + assert_eq!(total_roots_removed, 0); + + assert_eq!(forest.forest.tree_count(), 22); +} + +#[test] +fn prune_handles_multiple_accounts() { + let mut forest = InnerForest::new(); + let account1 = dummy_account(); + let account2 = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let faucet_id = dummy_faucet(); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); + + let mut vault_delta1 = AccountVaultDelta::default(); + vault_delta1.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); + let delta1 = dummy_partial_delta(account1, vault_delta1, AccountStorageDelta::default()); + forest.update_account(block_num, &delta1).unwrap(); + + let mut vault_delta2 = AccountVaultDelta::default(); + vault_delta2.add_asset(dummy_fungible_asset(account2, amount * 2)).unwrap(); + let delta2 = dummy_partial_delta(account2, vault_delta2, AccountStorageDelta::default()); + forest.update_account(block_num, &delta2).unwrap(); + } + + assert_eq!(forest.forest.tree_count(), 22); + + let total_roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + let expected_removed_per_account = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; + assert_eq!(total_roots_removed, 0); + assert!(total_roots_removed <= expected_removed_per_account * 2); + + assert_eq!(forest.forest.tree_count(), 22); +} + +#[test] +fn prune_handles_multiple_slots() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_a = StorageSlotName::mock(1); + let slot_b = StorageSlotName::mock(2); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(Word::from([i, 0, 0, 0]), Word::from([i, 0, 0, 1])); + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(Word::from([i, 0, 0, 2]), Word::from([i, 0, 0, 3])); + let raw = BTreeMap::from_iter([ + (slot_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_b.clone(), StorageSlotDelta::Map(map_delta_b)), + ]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.forest.tree_count(), 22); + + let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); + let total_roots_removed = forest.prune(chain_tip); + + assert_eq!(total_roots_removed, 0); + + assert_eq!(forest.forest.tree_count(), 22); +} + +#[test] +fn prune_preserves_most_recent_state_per_entity() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_map_a = StorageSlotName::mock(1); + let slot_map_b = StorageSlotName::mock(2); + + // Block 1: Create vault + map_a + map_b + let block_1 = BlockNumber::from(1); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(Word::from([1u32, 0, 0, 0]), Word::from([100u32, 0, 0, 0])); + + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(Word::from([2u32, 0, 0, 0]), Word::from([200u32, 0, 0, 0])); + + let raw = BTreeMap::from_iter([ + (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), + ]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + // Block 51: Update only map_a + let block_at_51 = BlockNumber::from(51); + let mut map_delta_a_new = StorageMapDelta::default(); + map_delta_a_new.insert(Word::from([1u32, 0, 0, 0]), Word::from([999u32, 0, 0, 0])); + + let raw_at_51 = + BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_new))]); + let storage_delta_at_51 = AccountStorageDelta::from_raw(raw_at_51); + let delta_at_51 = + dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_at_51); + forest.update_account(block_at_51, &delta_at_51).unwrap(); + + // Block 100: Prune + let block_100 = BlockNumber::from(100); + let total_roots_removed = forest.prune(block_100); + + assert_eq!(total_roots_removed, 0); + + assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_at_51).is_some()); + assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_1).is_some()); + assert!(forest.get_storage_map_root(account_id, &slot_map_b, block_1).is_some()); +} + +#[test] +fn prune_preserves_entries_within_retention_window() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_map = StorageSlotName::mock(1); + + let blocks = [1, 25, 50, 75, 100]; + + for &block_num in &blocks { + let block = BlockNumber::from(block_num); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, u64::from(block_num) * 100)) + .unwrap(); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(Word::from([block_num, 0, 0, 0]), Word::from([block_num * 10, 0, 0, 0])); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block, &delta).unwrap(); + } + + // Block 100: Prune (retention window = 50 blocks, cutoff = 50) + let block_100 = BlockNumber::from(100); + let total_roots_removed = forest.prune(block_100); + + // Blocks 1 and 25 pruned (outside retention, have newer entries) + assert_eq!(total_roots_removed, 4); + + assert!(forest.get_vault_root(account_id, BlockNumber::from(1)).is_none()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(25)).is_none()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(50)).is_some()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(75)).is_some()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(100)).is_some()); +} + +/// Two accounts start with identical vault roots (same asset amount). When one account changes +/// in the next block, verify the unchanged account's vault root still works for lookups and +/// witness generation. +#[test] +fn shared_vault_root_retained_when_one_account_changes() { + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let faucet_id = dummy_faucet(); + + // Block 1: Both accounts have identical vaults (same asset) + let block_1 = BlockNumber::GENESIS.child(); + let initial_amount = 1000u64; + let asset = dummy_fungible_asset(faucet_id, initial_amount); + let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); + + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(asset).unwrap(); + let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2 + .add_asset(dummy_fungible_asset(faucet_id, initial_amount)) + .unwrap(); + let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_2).unwrap(); + + // Both accounts should have the same vault root (structural sharing in SmtForest) + let root1_at_block1 = forest.get_vault_root(account1, block_1).unwrap(); + let root2_at_block1 = forest.get_vault_root(account2, block_1).unwrap(); + assert_eq!(root1_at_block1, root2_at_block1, "identical vaults should have identical roots"); + + // Block 2: Only account2 changes (adds more assets) + let block_2 = block_1.child(); + let mut vault_delta_2_update = AccountVaultDelta::default(); + vault_delta_2_update.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); + let delta_2_update = + dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2_update).unwrap(); + + // Account2 now has a different root + let root2_at_block2 = forest.get_vault_root(account2, block_2).unwrap(); + assert_ne!(root2_at_block1, root2_at_block2, "account2 vault should have changed"); + + assert!(forest.get_vault_root(account1, block_2).is_some()); + + let witnesses = forest + .get_vault_asset_witnesses(account1, block_2, [asset_key].into()) + .expect("witness generation should succeed for prior version"); + assert_eq!(witnesses.len(), 1); } diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 504ea06313..4171053fe9 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -216,7 +216,7 @@ pub async fn load_mmr(db: &mut Db) -> Result = block - .body() + let duplicate_nullifiers: Vec<_> = body .created_nullifiers() .iter() .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) @@ -304,11 +305,7 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -325,9 +322,7 @@ impl State { let account_tree_update = inner .account_tree .compute_mutations( - block - .body() - .updated_accounts() + body.updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), ) @@ -355,14 +350,13 @@ impl State { ) }; - // build note tree - let note_tree = block.body().compute_block_note_tree(); + // Build note tree + let note_tree = body.compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } - let notes = block - .body() + let notes = body .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -401,12 +395,12 @@ impl State { // Extract public account updates with deltas before block is moved into async task. // Private accounts are filtered out since they don't expose their state changes. let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { + Vec::from_iter(body.updated_accounts().iter().filter_map( + |update| match update.details() { AccountUpdateDetails::Delta(delta) => Some(delta.clone()), AccountUpdateDetails::Private => None, - } - })); + }, + )); // The DB and in-memory state updates need to be synchronized and are partially // overlapping. Namely, the DB transaction only proceeds after this task acquires the @@ -471,7 +465,8 @@ impl State { .in_current_span() .await?; - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + let mut forest = self.forest.write().await; + forest.apply_block_updates(block_num, account_deltas)?; info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); @@ -1055,7 +1050,8 @@ impl State { /// /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. /// Returns an error if the forest doesn't have data for the requested slot. - /// All-entries queries (`SlotData::All`) use the forest to return all entries. + /// All-entries queries (`SlotData::All`) use the forest to request all entries database. + #[allow(clippy::too_many_lines)] async fn fetch_public_account_details( &self, account_id: AccountId, @@ -1106,29 +1102,73 @@ impl State { let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); + let mut map_keys_requests = Vec::new(); + let mut all_entries_requests = Vec::new(); + let mut storage_request_slots = Vec::with_capacity(storage_requests.len()); - // Use forest for storage map queries - let forest_guard = self.forest.read().await; + for (index, StorageMapRequest { slot_name, slot_data }) in + storage_requests.into_iter().enumerate() + { + storage_request_slots.push(slot_name.clone()); + match slot_data { + SlotData::MapKeys(keys) => { + map_keys_requests.push((index, slot_name, keys)); + }, + SlotData::All => { + all_entries_requests.push((index, slot_name)); + }, + } + } - for StorageMapRequest { slot_name, slot_data } in storage_requests { - let details = match &slot_data { - SlotData::MapKeys(keys) => forest_guard - .open_storage_map(account_id, slot_name.clone(), block_num, keys) - .ok_or_else(|| DatabaseError::StorageRootNotFound { + let mut storage_map_details_by_index = vec![None; storage_request_slots.len()]; + + if !map_keys_requests.is_empty() { + let forest_guard = self.forest.read().await; + for (index, slot_name, keys) in map_keys_requests { + let details = forest_guard + .get_storage_map_details_for_keys( account_id, - slot_name: slot_name.to_string(), + slot_name.clone(), block_num, - })? - .map_err(DatabaseError::MerkleError)?, - SlotData::All => forest_guard - .storage_map_entries(account_id, slot_name.clone(), block_num) + &keys, + ) .ok_or_else(|| DatabaseError::StorageRootNotFound { account_id, slot_name: slot_name.to_string(), block_num, - })?, - }; + })? + .map_err(DatabaseError::MerkleError)?; + storage_map_details_by_index[index] = Some(details); + } + } + // TODO parallelize the read requests + for (index, slot_name) in all_entries_requests { + let details = self + .db + .reconstruct_storage_map_from_db( + account_id, + slot_name.clone(), + block_num, + Some( + // TODO unify this with + // `AccountStorageMapDetails::MAX_RETURN_ENTRIES` + // and accumulated the limits + ::LIMIT, + ), + ) + .await?; + storage_map_details_by_index[index] = Some(details); + } + + for (details, slot_name) in + storage_map_details_by_index.into_iter().zip(storage_request_slots) + { + let details = details.ok_or_else(|| DatabaseError::StorageRootNotFound { + account_id, + slot_name: slot_name.to_string(), + block_num, + })?; storage_map_details.push(details); } @@ -1149,7 +1189,7 @@ impl State { account_id: AccountId, block_range: RangeInclusive, ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range).await + self.db.select_storage_map_sync_values(account_id, block_range, None).await } /// Loads a block from the block store. Return `Ok(None)` if the block is not found. From e426ffcd9e4e231d266925a8e83afaec565f4ce4 Mon Sep 17 00:00:00 2001 From: Santiago Pittella <87827390+SantiagoPittella@users.noreply.github.com> Date: Sat, 7 Mar 2026 03:58:44 -0300 Subject: [PATCH 4/8] feat(monitor): add self-healing in network monitor (#1748) (#1756) --- CHANGELOG.md | 1 + bin/network-monitor/src/counter.rs | 51 ++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f39c4cae60..d0d6de612c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## v0.13.8 (TBD) - Private notes with the network note attachment are no longer incorrectly considered as network notes (#[#1736](https://github.com/0xMiden/node/pull/1736)). +- Fixed network monitor looping on stale wallet nonce after node restarts by re-syncing wallet state from RPC after repeated failures ([#1748](https://github.com/0xMiden/node/pull/1748)). ## v0.13.7 (2026-02-25) diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index c044267331..0bb27c10da 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -43,6 +43,9 @@ use rand_chacha::ChaCha20Rng; use tokio::sync::{Mutex, watch}; use tracing::{error, info, instrument, warn}; +/// Number of consecutive increment failures before re-syncing the wallet account from the RPC. +const RESYNC_FAILURE_THRESHOLD: usize = 3; + use crate::COMPONENT; use crate::config::MonitorConfig; use crate::deploy::counter::COUNTER_SLOT_NAME; @@ -397,6 +400,7 @@ pub async fn run_increment_task( let mut rng = ChaCha20Rng::from_os_rng(); let mut interval = tokio::time::interval(config.counter_increment_interval); + let mut consecutive_failures: usize = 0; loop { interval.tick().await; @@ -416,6 +420,8 @@ pub async fn run_increment_task( .await { Ok((tx_id, final_account, block_height)) => { + consecutive_failures = 0; + let target_value = handle_increment_success( &mut wallet_account, &final_account, @@ -435,7 +441,21 @@ pub async fn run_increment_task( } }, Err(e) => { + consecutive_failures += 1; last_error = Some(handle_increment_failure(&mut details, &e)); + + if consecutive_failures >= RESYNC_FAILURE_THRESHOLD { + if try_resync_wallet_account( + &mut rpc_client, + &mut wallet_account, + &mut data_store, + ) + .await + .is_ok() + { + consecutive_failures = 0; + } + } }, } @@ -480,6 +500,37 @@ fn handle_increment_success( Ok(new_expected) } +/// Re-sync the wallet account from the RPC after repeated failures. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.try_resync_wallet_account", + skip_all, + fields(account.id = %wallet_account.id()), + level = "warn", + err, +)] +async fn try_resync_wallet_account( + rpc_client: &mut RpcClient, + wallet_account: &mut Account, + data_store: &mut MonitorDataStore, +) -> Result<()> { + let fresh_account = fetch_wallet_account(rpc_client, wallet_account.id()) + .await + .inspect_err(|e| { + error!(account.id = %wallet_account.id(), err = ?e, "failed to re-sync wallet account from RPC"); + })? + .context("wallet account not found on-chain during re-sync") + .inspect_err(|e| { + error!(account.id = %wallet_account.id(), err = ?e, "wallet account not found on-chain during re-sync"); + })?; + + info!(account.id = %wallet_account.id(), "wallet account re-synced from RPC"); + *wallet_account = fresh_account; + data_store.update_account(wallet_account.clone()); + Ok(()) +} + /// Handle the failure path when creating/submitting the network note fails. fn handle_increment_failure(details: &mut IncrementDetails, error: &anyhow::Error) -> String { error!("Failed to create and submit network note: {:?}", error); From 7c9df3e44f6681d24be5121954ddd7178aab3f61 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 11 Mar 2026 13:19:14 +0100 Subject: [PATCH 5/8] feat: port rate limiter from `next` (#1763) --- CHANGELOG.md | 1 + Cargo.lock | 120 +++++++++++++++++++ bin/node/src/commands/block_producer.rs | 31 ++--- bin/node/src/commands/bundled.rs | 30 ++--- bin/node/src/commands/mod.rs | 1 - bin/node/src/commands/rpc.rs | 21 +--- bin/node/src/commands/store.rs | 24 ++-- bin/node/src/commands/validator.rs | 19 +-- bin/stress-test/src/seeding/mod.rs | 5 +- crates/block-producer/src/server/mod.rs | 19 +-- crates/block-producer/src/server/tests.rs | 7 +- crates/rpc/src/server/mod.rs | 15 +-- crates/rpc/src/tests.rs | 124 +++++++++++++------- crates/store/src/server/mod.rs | 15 ++- crates/utils/Cargo.toml | 5 + crates/utils/src/clap.rs | 135 ++++++++++++++++++++++ crates/utils/src/grpc.rs | 4 + crates/utils/src/grpc/connect_info.rs | 18 +++ crates/utils/src/grpc/layers.rs | 63 ++++++++++ crates/utils/src/lib.rs | 1 + crates/validator/src/server/mod.rs | 8 +- docs/external/src/operator/usage.md | 11 ++ 22 files changed, 518 insertions(+), 159 deletions(-) create mode 100644 crates/utils/src/clap.rs create mode 100644 crates/utils/src/grpc/connect_info.rs create mode 100644 crates/utils/src/grpc/layers.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index d0d6de612c..e2ace29fbc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ - Added support for the note transport layer in the network monitor ([#1660](https://github.com/0xMiden/miden-node/pull/1660)). - Debian packages now include debug symbols ([#1666](https://github.com/0xMiden/miden-node/pull/1666)). - Debian packages now have coredumps enabled ([#1666](https://github.com/0xMiden/miden-node/pull/1666)). +- Added per-IP gRPC rate limiting across services as well as global concurrent connection limit ([#1763](https://github.com/0xMiden/node/issues/1763)). - Fixed storage map keys not being hashed before insertion into the store's SMT forest ([#1681](https://github.com/0xMiden/miden-node/pull/1681)). ## v0.13.4 (2026-02-04) diff --git a/Cargo.lock b/Cargo.lock index a8f696a4ad..219e5210ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -976,6 +976,20 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "deadpool" version = "0.12.3" @@ -1441,6 +1455,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror 1.0.69", +] + [[package]] name = "fs-err" version = "3.2.2" @@ -1622,6 +1646,29 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "governor" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9efcab3c1958580ff1f25a2a41be1668f7603d849bb63af523b208a3cc1223b8" +dependencies = [ + "cfg-if", + "dashmap", + "futures-sink", + "futures-timer", + "futures-util", + "getrandom 0.3.4", + "hashbrown 0.16.1", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "quanta", + "rand 0.9.2", + "smallvec", + "spinning_top", + "web-time", +] + [[package]] name = "group" version = "0.13.0" @@ -1669,6 +1716,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + [[package]] name = "hashbrown" version = "0.15.5" @@ -2970,9 +3023,12 @@ version = "0.13.7" dependencies = [ "anyhow", "bytes", + "clap 4.5.54", "figment", + "governor", "http", "http-body-util", + "humantime", "itertools 0.14.0", "lru 0.16.3", "miden-protocol", @@ -2984,7 +3040,9 @@ dependencies = [ "thiserror 2.0.18", "tokio", "tonic", + "tower", "tower-http", + "tower_governor", "tracing", "tracing-forest", "tracing-opentelemetry", @@ -3458,6 +3516,18 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -4473,6 +4543,21 @@ dependencies = [ "pulldown-cmark", ] +[[package]] +name = "quanta" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -4580,6 +4665,15 @@ dependencies = [ "rand_core 0.9.5", ] +[[package]] +name = "raw-cpuid" +version = "11.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" +dependencies = [ + "bitflags 2.10.0", +] + [[package]] name = "rayon" version = "1.11.0" @@ -5282,6 +5376,15 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spinning_top" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] + [[package]] name = "spki" version = "0.7.3" @@ -6006,6 +6109,23 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +[[package]] +name = "tower_governor" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44de9b94d849d3c46e06a883d72d408c2de6403367b39df2b1c9d9e7b6736fe6" +dependencies = [ + "axum", + "forwarded-header-value", + "governor", + "http", + "pin-project", + "thiserror 2.0.18", + "tonic", + "tower", + "tracing", +] + [[package]] name = "tracing" version = "0.1.44" diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 5cfbc78fcc..eb329a4d59 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -1,18 +1,11 @@ -use std::time::Duration; - use anyhow::Context; use miden_node_block_producer::BlockProducer; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::grpc::UrlExt; use url::Url; use super::{ENV_BLOCK_PRODUCER_URL, ENV_STORE_BLOCK_PRODUCER_URL}; -use crate::commands::{ - BlockProducerConfig, - DEFAULT_TIMEOUT, - ENV_ENABLE_OTEL, - ENV_VALIDATOR_BLOCK_PRODUCER_URL, - duration_to_human_readable_string, -}; +use crate::commands::{BlockProducerConfig, ENV_ENABLE_OTEL, ENV_VALIDATOR_BLOCK_PRODUCER_URL}; #[derive(clap::Subcommand)] pub enum BlockProducerCommand { @@ -40,16 +33,8 @@ pub enum BlockProducerCommand { #[arg(long = "enable-otel", default_value_t = false, env = ENV_ENABLE_OTEL, value_name = "BOOL")] enable_otel: bool, - /// Maximum duration a gRPC request is allocated before being dropped by the server. - /// - /// This may occur if the server is overloaded or due to an internal bug. - #[arg( - long = "grpc.timeout", - default_value = &duration_to_human_readable_string(DEFAULT_TIMEOUT), - value_parser = humantime::parse_duration, - value_name = "DURATION" - )] - grpc_timeout: Duration, + #[command(flatten)] + grpc_options: GrpcOptionsInternal, }, } @@ -61,7 +46,7 @@ impl BlockProducerCommand { validator_url, block_producer, enable_otel: _, - grpc_timeout, + grpc_options, } = self; let block_producer_address = @@ -91,7 +76,7 @@ impl BlockProducerCommand { block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, max_batches_per_block: block_producer.max_batches_per_block, - grpc_timeout, + grpc_options, mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() @@ -133,7 +118,7 @@ mod tests { mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, - grpc_timeout: Duration::from_secs(10), + grpc_options: GrpcOptionsInternal::default(), }; let result = cmd.handle().await; assert!(result.is_err()); @@ -159,7 +144,7 @@ mod tests { mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, - grpc_timeout: Duration::from_secs(10), + grpc_options: GrpcOptionsInternal::default(), }; let result = cmd.handle().await; assert!(result.is_err()); diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 22f1199a3f..6483b1739c 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; use std::path::PathBuf; -use std::time::Duration; use anyhow::Context; use miden_node_block_producer::BlockProducer; use miden_node_ntx_builder::NetworkTransactionBuilder; use miden_node_rpc::Rpc; use miden_node_store::Store; +use miden_node_utils::clap::GrpcOptionsExternal; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; use miden_protocol::block::BlockSigner; @@ -19,13 +19,11 @@ use url::Url; use super::{ENV_DATA_DIRECTORY, ENV_RPC_URL}; use crate::commands::{ BlockProducerConfig, - DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, - duration_to_human_readable_string, }; #[derive(clap::Subcommand)] @@ -85,16 +83,8 @@ pub enum BundledCommand { #[arg(long = "enable-otel", default_value_t = false, env = ENV_ENABLE_OTEL, value_name = "BOOL")] enable_otel: bool, - /// Maximum duration a gRPC request is allocated before being dropped by the server. - /// - /// This may occur if the server is overloaded or due to an internal bug. - #[arg( - long = "grpc.timeout", - default_value = &duration_to_human_readable_string(DEFAULT_TIMEOUT), - value_parser = humantime::parse_duration, - value_name = "DURATION" - )] - grpc_timeout: Duration, + #[command(flatten)] + grpc_options: GrpcOptionsExternal, /// Insecure, hex-encoded validator secret key for development and testing purposes. #[arg( @@ -133,7 +123,7 @@ impl BundledCommand { block_producer, ntx_builder, enable_otel: _, - grpc_timeout, + grpc_options, validator_insecure_secret_key, } => { let secret_key_bytes = hex::decode(validator_insecure_secret_key)?; @@ -143,7 +133,7 @@ impl BundledCommand { data_directory, ntx_builder, block_producer, - grpc_timeout, + grpc_options, signer, ) .await @@ -157,7 +147,7 @@ impl BundledCommand { data_directory: PathBuf, ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, - grpc_timeout: Duration, + grpc_options: GrpcOptionsExternal, signer: impl BlockSigner + Send + Sync + 'static, ) -> anyhow::Result<()> { // Start listening on all gRPC urls so that inter-component connections can be created @@ -212,7 +202,7 @@ impl BundledCommand { block_producer_listener: store_block_producer_listener, ntx_builder_listener: store_ntx_builder_listener, data_directory: data_directory_clone, - grpc_timeout, + grpc_options: grpc_options.into(), } .serve() .await @@ -240,7 +230,7 @@ impl BundledCommand { block_interval: block_producer.block_interval, max_batches_per_block: block_producer.max_batches_per_block, max_txs_per_batch: block_producer.max_txs_per_batch, - grpc_timeout, + grpc_options: grpc_options.into(), mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() @@ -255,7 +245,7 @@ impl BundledCommand { async move { Validator { address: validator_address, - grpc_timeout, + grpc_options: grpc_options.into(), signer, } .serve() @@ -279,7 +269,7 @@ impl BundledCommand { store_url, block_producer_url: Some(block_producer_url), validator_url, - grpc_timeout, + grpc_options, } .serve() .await diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 7e8fa7e69f..7eb891756a 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -39,7 +39,6 @@ const ENV_NTX_SCRIPT_CACHE_SIZE: &str = "MIDEN_NTX_DATA_STORE_SCRIPT_CACHE_SIZE" const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); -const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); const DEFAULT_NTX_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); // Formats a Duration into a human-readable string for display in clap help text. diff --git a/bin/node/src/commands/rpc.rs b/bin/node/src/commands/rpc.rs index 643734f378..04e82f917b 100644 --- a/bin/node/src/commands/rpc.rs +++ b/bin/node/src/commands/rpc.rs @@ -1,12 +1,11 @@ -use std::time::Duration; - use anyhow::Context; use miden_node_rpc::Rpc; +use miden_node_utils::clap::GrpcOptionsExternal; use miden_node_utils::grpc::UrlExt; use url::Url; use super::{ENV_BLOCK_PRODUCER_URL, ENV_RPC_URL, ENV_STORE_RPC_URL, ENV_VALIDATOR_URL}; -use crate::commands::{DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, duration_to_human_readable_string}; +use crate::commands::ENV_ENABLE_OTEL; #[derive(clap::Subcommand)] pub enum RpcCommand { @@ -36,16 +35,8 @@ pub enum RpcCommand { #[arg(long = "enable-otel", default_value_t = false, env = ENV_ENABLE_OTEL, value_name = "BOOL")] enable_otel: bool, - /// Maximum duration a gRPC request is allocated before being dropped by the server. - /// - /// This may occur if the server is overloaded or due to an internal bug. - #[arg( - long = "grpc.timeout", - default_value = &duration_to_human_readable_string(DEFAULT_TIMEOUT), - value_parser = humantime::parse_duration, - value_name = "DURATION" - )] - grpc_timeout: Duration, + #[command(flatten)] + grpc_options: GrpcOptionsExternal, }, } @@ -57,7 +48,7 @@ impl RpcCommand { block_producer_url, validator_url, enable_otel: _, - grpc_timeout, + grpc_options, } = self; let listener = url.to_socket().context("Failed to extract socket address from RPC URL")?; @@ -70,7 +61,7 @@ impl RpcCommand { store_url, block_producer_url, validator_url, - grpc_timeout, + grpc_options, } .serve() .await diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 9dd311368f..7dda3b2a12 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -1,9 +1,9 @@ use std::path::{Path, PathBuf}; -use std::time::Duration; use anyhow::Context; use miden_node_store::Store; use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::grpc::UrlExt; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::Deserializable; @@ -16,12 +16,10 @@ use super::{ ENV_STORE_RPC_URL, }; use crate::commands::{ - DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, INSECURE_VALIDATOR_KEY_HEX, - duration_to_human_readable_string, }; #[allow(clippy::large_enum_variant, reason = "single use enum")] @@ -83,16 +81,8 @@ pub enum StoreCommand { #[arg(long = "enable-otel", default_value_t = false, env = ENV_ENABLE_OTEL, value_name = "BOOL")] enable_otel: bool, - /// Maximum duration a gRPC request is allocated before being dropped by the server. - /// - /// This may occur if the server is overloaded or due to an internal bug. - #[arg( - long = "grpc.timeout", - default_value = &duration_to_human_readable_string(DEFAULT_TIMEOUT), - value_parser = humantime::parse_duration, - value_name = "DURATION" - )] - grpc_timeout: Duration, + #[command(flatten)] + grpc_options: GrpcOptionsInternal, }, } @@ -117,14 +107,14 @@ impl StoreCommand { block_producer_url, data_directory, enable_otel: _, - grpc_timeout, + grpc_options, } => { Self::start( rpc_url, ntx_builder_url, block_producer_url, data_directory, - grpc_timeout, + grpc_options, ) .await }, @@ -144,7 +134,7 @@ impl StoreCommand { ntx_builder_url: Url, block_producer_url: Url, data_directory: PathBuf, - grpc_timeout: Duration, + grpc_options: GrpcOptionsInternal, ) -> anyhow::Result<()> { let rpc_listener = rpc_url .to_socket() @@ -172,7 +162,7 @@ impl StoreCommand { ntx_builder_listener, block_producer_listener, data_directory, - grpc_timeout, + grpc_options, } .serve() .await diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index f543be3013..3d967f7253 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -1,6 +1,5 @@ -use std::time::Duration; - use anyhow::Context; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; @@ -8,12 +7,10 @@ use miden_protocol::utils::Deserializable; use url::Url; use crate::commands::{ - DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_VALIDATOR_INSECURE_SECRET_KEY, ENV_VALIDATOR_URL, INSECURE_VALIDATOR_KEY_HEX, - duration_to_human_readable_string, }; #[derive(clap::Subcommand)] @@ -31,14 +28,8 @@ pub enum ValidatorCommand { #[arg(long = "enable-otel", default_value_t = true, env = ENV_ENABLE_OTEL, value_name = "BOOL")] enable_otel: bool, - /// Maximum duration a gRPC request is allocated before being dropped by the server. - #[arg( - long = "grpc.timeout", - default_value = &duration_to_human_readable_string(DEFAULT_TIMEOUT), - value_parser = humantime::parse_duration, - value_name = "DURATION" - )] - grpc_timeout: Duration, + #[command(flatten)] + grpc_options: GrpcOptionsInternal, /// Insecure, hex-encoded validator secret key for development and testing purposes. /// @@ -51,7 +42,7 @@ pub enum ValidatorCommand { impl ValidatorCommand { pub async fn handle(self) -> anyhow::Result<()> { let Self::Start { - url, grpc_timeout, insecure_secret_key, .. + url, grpc_options, insecure_secret_key, .. } = self; let address = @@ -59,7 +50,7 @@ impl ValidatorCommand { let signer = SecretKey::read_from_bytes(hex::decode(insecure_secret_key)?.as_ref())?; - Validator { address, grpc_timeout, signer } + Validator { address, grpc_options, signer } .serve() .await .context("failed while serving validator component") diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index e0fe79338f..ea30d82ab9 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use std::path::PathBuf; use std::sync::{Arc, Mutex}; -use std::time::{Duration, Instant}; +use std::time::Instant; use metrics::SeedingMetrics; use miden_air::ExecutionProof; @@ -10,6 +10,7 @@ use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_store::{DataDirectory, GenesisState, Store}; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::tracing::grpc::OtelInterceptor; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ @@ -546,7 +547,7 @@ pub async fn start_store( ntx_builder_listener, block_producer_listener, data_directory: dir, - grpc_timeout: Duration::from_secs(30), + grpc_options: GrpcOptionsInternal::bench(), } .serve() .await diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 8245c1ee6b..c85fe5c0bb 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -10,6 +10,7 @@ use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::generated::block_producer::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::block_producer_api_descriptor; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::formatting::{format_input_notes, format_output_notes}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; @@ -65,10 +66,8 @@ pub struct BlockProducer { pub max_txs_per_batch: usize, /// The maximum number of batches per block. pub max_batches_per_block: usize, - /// Server-side timeout for an individual gRPC request. - /// - /// If the handler takes longer than this duration, the server cancels the call. - pub grpc_timeout: Duration, + /// Server-side gRPC options. + pub grpc_options: GrpcOptionsInternal, /// The maximum number of inflight transactions allowed in the mempool at once. pub mempool_tx_capacity: NonZeroUsize, @@ -156,7 +155,7 @@ impl BlockProducer { let mempool = mempool.clone(); async move { BlockProducerRpcServer::new(mempool, store) - .serve(listener, self.grpc_timeout) + .serve(listener, self.grpc_options) .await } }) @@ -239,7 +238,11 @@ impl BlockProducerRpcServer { // SERVER STARTUP // -------------------------------------------------------------------------------------------- - async fn serve(self, listener: TcpListener, timeout: Duration) -> anyhow::Result<()> { + async fn serve( + self, + listener: TcpListener, + grpc_options: GrpcOptionsInternal, + ) -> anyhow::Result<()> { // Start background task to periodically update cached mempool stats self.spawn_mempool_stats_updater().await; @@ -258,10 +261,12 @@ impl BlockProducerRpcServer { .context("failed to build reflection service")?; // Build the gRPC server with the API service and trace layer. + tonic::transport::Server::builder() + .accept_http1(true) + .timeout(grpc_options.request_timeout) .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .timeout(timeout) .add_service(api_server::ApiServer::new(self)) .add_service(reflection_service) .add_service(reflection_service_alpha) diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 453512597b..c803b1a5d8 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -1,3 +1,4 @@ +use std::num::NonZeroUsize; use std::time::Duration; use miden_air::{ExecutionProof, HashFunction}; @@ -5,6 +6,7 @@ use miden_node_proto::generated::{ self as proto, block_producer::api_client as block_producer_client, }; use miden_node_store::{GenesisState, Store}; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_protocol::{ Digest, account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}, @@ -55,6 +57,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { block_interval: Duration::from_millis(500), max_txs_per_batch: SERVER_MAX_TXS_PER_BATCH, max_batches_per_block: SERVER_MAX_BATCHES_PER_BLOCK, + grpc_options, } .serve() .await @@ -94,7 +97,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { ntx_builder_listener, block_producer_listener, data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + grpc_options: GrpcOptionsInternal::bench(), } .serve() .await @@ -160,7 +163,7 @@ async fn restart_store( ntx_builder_listener, block_producer_listener, data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + grpc_options: GrpcOptionsInternal::bench(), } .serve() .await diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index db860bf4d5..63de3f04ad 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -1,10 +1,10 @@ -use std::time::Duration; - use accept::AcceptHeaderLayer; use anyhow::Context; use miden_node_proto::generated::rpc::api_server; use miden_node_proto_build::rpc_api_descriptor; +use miden_node_utils::clap::GrpcOptionsExternal; use miden_node_utils::cors::cors_for_grpc_web_layer; +use miden_node_utils::grpc; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; use tokio::net::TcpListener; @@ -32,10 +32,7 @@ pub struct Rpc { pub store_url: Url, pub block_producer_url: Option, pub validator_url: Url, - /// Server-side timeout for an individual gRPC request. - /// - /// If the handler takes longer than this duration, the server cancels the call. - pub grpc_timeout: Duration, + pub grpc_options: GrpcOptionsExternal, } impl Rpc { @@ -80,10 +77,14 @@ impl Rpc { tonic::transport::Server::builder() .accept_http1(true) - .timeout(self.grpc_timeout) + .max_connection_age(self.grpc_options.max_connection_age) + .timeout(self.grpc_options.request_timeout) .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) + .layer(grpc::connect_info_layer()) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .layer(HealthCheckLayer) + .layer(grpc::rate_limit_concurrent_connections(self.grpc_options)) + .layer(grpc::rate_limit_per_ip(self.grpc_options)?) // Note: must come before the accept layer, as otherwise accept rejections // do _not_ get CORS headers applied, masking the accept error in // web-clients (which would experience CORS rejection). diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index b35fe8b6dc..bdbd915876 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -1,13 +1,15 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::num::{NonZeroU32, NonZeroU64}; use std::time::Duration; use http::header::{ACCEPT, CONTENT_TYPE}; use http::{HeaderMap, HeaderValue}; -use miden_node_proto::clients::{Builder, RpcClient}; +use miden_node_proto::clients::{Builder, GrpcClient, Interceptor, RpcClient}; use miden_node_proto::generated::rpc::api_client::ApiClient as ProtoClient; use miden_node_proto::generated::{self as proto}; use miden_node_store::Store; use miden_node_store::genesis::config::GenesisConfig; +use miden_node_utils::clap::{GrpcOptionsExternal, GrpcOptionsInternal}; use miden_node_utils::fee::test_fee; use miden_node_utils::limiter::{ QueryParamAccountIdLimit, @@ -40,11 +42,13 @@ use url::Url; use crate::Rpc; +const REQUEST_TIMEOUT: Duration = Duration::from_secs(5); + #[tokio::test] async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = { @@ -67,11 +71,45 @@ async fn rpc_server_accepts_requests_without_accept_header() { shutdown_store(store_runtime).await; } +#[tokio::test] +async fn rpc_rate_limits_per_ip() { + let grpc_options = GrpcOptionsExternal { + burst_size: NonZeroU32::new(8).unwrap(), + replenish_n_per_second_per_ip: NonZeroU64::new(1).unwrap(), + ..GrpcOptionsExternal::test() + }; + let (_, rpc_addr, store_listener) = start_rpc_with_options(grpc_options).await; + let (store_runtime, data_directory, _genesis) = start_store(store_listener).await; + + let url = rpc_addr.to_string(); + let url = Url::parse(format!("http://{}", &url).as_str()).unwrap(); + let mut rpc_client = connect_rpc(url.clone(), Some(IpAddr::V4(Ipv4Addr::LOCALHOST))).await; + + let mut results = Vec::new(); + let mut last_error = None; + for _ in 0..256 { + let result = send_request(&mut rpc_client).await; + if let Err(err) = &result { + last_error = Some(err.code()); + } + results.push(result); + } + + assert!(results.iter().any(std::result::Result::is_ok)); + assert!( + last_error.is_some_and(|code| code == tonic::Code::ResourceExhausted), + "expected rate limit error but got: {last_error:?}" + ); + + shutdown_store(store_runtime).await; + drop(data_directory); +} + #[tokio::test] async fn rpc_server_accepts_requests_with_accept_header() { // Start the RPC. - let (mut rpc_client, _, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (mut rpc_client, _, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_listener).await; // Send any request to the RPC. let response = send_request(&mut rpc_client).await; @@ -87,8 +125,8 @@ async fn rpc_server_accepts_requests_with_accept_header() { async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { for version in ["1.9.0", "0.8.1", "0.8.0", "0.999.0", "99.0.0"] { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_listener).await; // Recreate the RPC client with an invalid version. let url = rpc_addr.to_string(); @@ -123,14 +161,15 @@ async fn rpc_startup_is_robust_to_network_failures() { // connect to each other on startup and that they reconnect after the store is restarted. // Start the RPC. - let (mut rpc_client, _, store_addr) = start_rpc().await; + let (mut rpc_client, _, store_listener) = start_rpc().await; + let store_addr = store_listener.local_addr().expect("store listener should expose address"); // Test: requests against RPC api should fail immediately let response = send_request(&mut rpc_client).await; assert!(response.is_err()); // Start the store. - let (store_runtime, data_directory, _genesis) = start_store(store_addr).await; + let (store_runtime, data_directory, _genesis) = start_store(store_listener).await; // Test: send request against RPC api and should succeed let response = send_request(&mut rpc_client).await; @@ -153,8 +192,8 @@ async fn rpc_startup_is_robust_to_network_failures() { #[tokio::test] async fn rpc_server_has_web_support() { // Start server - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_listener).await; // Send a status request let client = reqwest::Client::new(); @@ -196,8 +235,8 @@ async fn rpc_server_has_web_support() { #[tokio::test] async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, genesis) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = @@ -282,8 +321,8 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { #[tokio::test] async fn rpc_server_rejects_tx_submissions_without_genesis() { // Start the RPC. - let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = @@ -363,14 +402,29 @@ async fn send_request( rpc_client.get_block_header_by_number(request).await } +async fn connect_rpc(url: Url, local_address: Option) -> RpcClient { + let mut endpoint = tonic::transport::Endpoint::from_shared(url.to_string()) + .expect("Url type always results in valid endpoint") + .timeout(REQUEST_TIMEOUT); + if let Some(local_address) = local_address { + endpoint = endpoint.local_address(Some(local_address)); + } + let channel = endpoint.connect().await.expect("Failed to build channel"); + let interceptor = Interceptor::default(); + RpcClient::with_interceptor(channel, interceptor) +} + /// Binds a socket on an available port, runs the RPC server on it, and /// returns a client to talk to the server, along with the socket address. -async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) { - let store_addr = { - let store_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); - store_listener.local_addr().expect("store should get a local address") - }; +async fn start_rpc() -> (RpcClient, std::net::SocketAddr, TcpListener) { + start_rpc_with_options(GrpcOptionsExternal::test()).await +} + +async fn start_rpc_with_options( + grpc_options: GrpcOptionsExternal, +) -> (RpcClient, std::net::SocketAddr, TcpListener) { + let store_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); + let store_addr = store_listener.local_addr().expect("store should get a local address"); let block_producer_addr = { let block_producer_listener = TcpListener::bind("127.0.0.1:0").await.expect("Failed to bind block-producer"); @@ -394,7 +448,7 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) store_url, block_producer_url: Some(block_producer_url), validator_url, - grpc_timeout: Duration::from_secs(30), + grpc_options, } .serve() .await @@ -403,20 +457,12 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) let url = rpc_addr.to_string(); // SAFETY: The rpc_addr is always valid as it is created from a `SocketAddr`. let url = Url::parse(format!("http://{}", &url).as_str()).unwrap(); - let rpc_client: RpcClient = Builder::new(url) - .without_tls() - .with_timeout(Duration::from_secs(10)) - .without_metadata_version() - .without_metadata_genesis() - .without_otel_context_injection() - .connect::() - .await - .expect("Failed to build client"); + let rpc_client = connect_rpc(url, None).await; - (rpc_client, rpc_addr, store_addr) + (rpc_client, rpc_addr, store_listener) } -async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { +async fn start_store(store_listener: TcpListener) -> (Runtime, TempDir, Word) { // Start the store. let data_directory = tempfile::tempdir().expect("tempdir should be created"); @@ -425,7 +471,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { let (genesis_state, _) = config.into_state(signer).unwrap(); Store::bootstrap(genesis_state.clone(), data_directory.path()).expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); - let rpc_listener = TcpListener::bind(store_addr).await.expect("store should bind a port"); + let rpc_listener = store_listener; let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") .await .expect("Failed to bind store ntx-builder gRPC endpoint"); @@ -442,7 +488,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { ntx_builder_listener, block_producer_listener, data_directory: dir, - grpc_timeout: Duration::from_secs(30), + grpc_options: GrpcOptionsInternal::test(), } .serve() .await @@ -482,7 +528,7 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) ntx_builder_listener, block_producer_listener, data_directory: dir, - grpc_timeout: Duration::from_secs(10), + grpc_options: GrpcOptionsInternal::test(), } .serve() .await @@ -494,8 +540,8 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) #[tokio::test] async fn get_limits_endpoint() { // Start the RPC and store - let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + let (mut rpc_client, _rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_listener).await; // Call the get_limits endpoint let response = rpc_client.get_limits(()).await.expect("get_limits should succeed"); diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index b4b5798db9..14e2de140f 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -10,6 +10,7 @@ use miden_node_proto_build::{ store_ntx_builder_api_descriptor, store_rpc_api_descriptor, }; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; use miden_protocol::block::BlockSigner; @@ -36,10 +37,7 @@ pub struct Store { pub ntx_builder_listener: TcpListener, pub block_producer_listener: TcpListener, pub data_directory: PathBuf, - /// Server-side timeout for an individual gRPC request. - /// - /// If the handler takes longer than this duration, the server cancels the call. - pub grpc_timeout: Duration, + pub grpc_options: GrpcOptionsInternal, } impl Store { @@ -89,7 +87,7 @@ impl Store { let ntx_builder_address = self.ntx_builder_listener.local_addr()?; let block_producer_address = self.block_producer_listener.local_addr()?; info!(target: COMPONENT, rpc_endpoint=?rpc_address, ntx_builder_endpoint=?ntx_builder_address, - block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_timeout, + block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_options.request_timeout, "Loading database"); let (termination_ask, mut termination_signal) = @@ -147,9 +145,9 @@ impl Store { // Build the gRPC server with the API services and trace layer. join_set.spawn( tonic::transport::Server::builder() + .timeout(self.grpc_options.request_timeout) .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .timeout(self.grpc_timeout) .add_service(rpc_service) .add_service(reflection_service.clone()) .add_service(reflection_service_alpha.clone()) @@ -158,9 +156,9 @@ impl Store { join_set.spawn( tonic::transport::Server::builder() + .timeout(self.grpc_options.request_timeout) .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .timeout(self.grpc_timeout) .add_service(ntx_builder_service) .add_service(reflection_service.clone()) .add_service(reflection_service_alpha.clone()) @@ -169,9 +167,10 @@ impl Store { join_set.spawn( tonic::transport::Server::builder() + .accept_http1(true) + .timeout(self.grpc_options.request_timeout) .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .timeout(self.grpc_timeout) .add_service(block_producer_service) .add_service(reflection_service) .add_service(reflection_service_alpha) diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index e61930937e..abd9ea0803 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -21,9 +21,12 @@ testing = ["miden-protocol/testing"] [dependencies] anyhow = { workspace = true } bytes = { version = "1.10" } +clap = { workspace = true } figment = { features = ["env", "toml"], version = "0.10" } +governor = { version = "0.10" } http = { workspace = true } http-body-util = { version = "0.1" } +humantime = { workspace = true } itertools = { workspace = true } lru = { workspace = true } miden-protocol = { workspace = true } @@ -35,7 +38,9 @@ serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } tokio = { workspace = true } tonic = { default-features = true, workspace = true } +tower = { features = ["limit"], workspace = true } tower-http = { features = ["catch-panic"], workspace = true } +tower_governor = { version = "0.8" } tracing = { workspace = true } tracing-forest = { features = ["chrono"], optional = true, version = "0.2" } tracing-opentelemetry = { version = "0.32" } diff --git a/crates/utils/src/clap.rs b/crates/utils/src/clap.rs new file mode 100644 index 0000000000..14e2898138 --- /dev/null +++ b/crates/utils/src/clap.rs @@ -0,0 +1,135 @@ +//! Public module for share clap pieces to reduce duplication + +use std::num::{NonZeroU32, NonZeroU64}; +use std::time::Duration; + +const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10); +const TEST_REQUEST_TIMEOUT: Duration = Duration::from_secs(5); +const DEFAULT_MAX_CONNECTION_AGE: Duration = Duration::from_secs(30 * 60); +const DEFAULT_REPLENISH_N_PER_SECOND_PER_IP: NonZeroU64 = NonZeroU64::new(16).unwrap(); +const DEFAULT_BURST_SIZE: NonZeroU32 = NonZeroU32::new(128).unwrap(); +const DEFAULT_MAX_CONCURRENT_CONNECTIONS: u64 = 1_000; +const BENCH_REQUEST_TIMEOUT: Duration = Duration::from_secs(24 * 60 * 60); + +// Formats a Duration into a human-readable string for display in clap help text +// and yields a &'static str by _leaking_ the string deliberately. +pub fn duration_to_human_readable_string(duration: Duration) -> &'static str { + Box::new(humantime::format_duration(duration).to_string()).leak() +} + +#[derive(clap::Args, Copy, Clone, Debug, PartialEq, Eq)] +pub struct GrpcOptionsInternal { + /// Maximum duration a gRPC request is allocated before being dropped by the server. + /// + /// This may occur if the server is overloaded or due to an internal bug. + #[arg( + long = "grpc.timeout", + default_value = duration_to_human_readable_string(DEFAULT_REQUEST_TIMEOUT), + value_parser = humantime::parse_duration, + value_name = "DURATION" + )] + pub request_timeout: Duration, +} + +impl Default for GrpcOptionsInternal { + fn default() -> Self { + Self { request_timeout: DEFAULT_REQUEST_TIMEOUT } + } +} + +impl From for GrpcOptionsInternal { + fn from(value: GrpcOptionsExternal) -> Self { + let GrpcOptionsExternal { request_timeout, .. } = value; + Self { request_timeout } + } +} + +impl GrpcOptionsInternal { + pub fn test() -> Self { + GrpcOptionsExternal::test().into() + } + pub fn bench() -> Self { + GrpcOptionsExternal::bench().into() + } +} + +#[derive(clap::Args, Copy, Clone, Debug, PartialEq, Eq)] +pub struct GrpcOptionsExternal { + /// Maximum duration a gRPC request is allocated before being dropped by the server. + /// + /// This may occur if the server is overloaded or due to an internal bug. + #[arg( + long = "grpc.timeout", + default_value = duration_to_human_readable_string(DEFAULT_REQUEST_TIMEOUT), + value_parser = humantime::parse_duration, + value_name = "DURATION" + )] + pub request_timeout: Duration, + + /// Maximum duration of a connection before we drop it on the server side irrespective of + /// activity. + #[arg( + long = "grpc.max_connection_age", + default_value = duration_to_human_readable_string(DEFAULT_MAX_CONNECTION_AGE), + value_parser = humantime::parse_duration, + value_name = "MAX_CONNECTION_AGE" + )] + pub max_connection_age: Duration, + + /// Number of connections to be served before the "API tokens" need to be replenished + /// per IP address. + #[arg( + long = "grpc.burst_size", + default_value_t = DEFAULT_BURST_SIZE, + value_name = "BURST_SIZE" + )] + pub burst_size: NonZeroU32, + + /// Number of request credits replenished per second per IP. + #[arg( + long = "grpc.replenish_n_per_second", + default_value_t = DEFAULT_REPLENISH_N_PER_SECOND_PER_IP, + value_name = "DEFAULT_REPLENISH_N_PER_SECOND" + )] + pub replenish_n_per_second_per_ip: NonZeroU64, + + /// Maximum number of concurrent connections accepted by the server. + #[arg( + long = "grpc.max_concurrent_connections", + default_value_t = DEFAULT_MAX_CONCURRENT_CONNECTIONS, + value_name = "MAX_CONCURRENT_CONNECTIONS" + )] + pub max_concurrent_connections: u64, +} + +impl Default for GrpcOptionsExternal { + fn default() -> Self { + Self { + request_timeout: DEFAULT_REQUEST_TIMEOUT, + max_connection_age: DEFAULT_MAX_CONNECTION_AGE, + burst_size: DEFAULT_BURST_SIZE, + replenish_n_per_second_per_ip: DEFAULT_REPLENISH_N_PER_SECOND_PER_IP, + max_concurrent_connections: DEFAULT_MAX_CONCURRENT_CONNECTIONS, + } + } +} + +impl GrpcOptionsExternal { + pub fn test() -> Self { + Self { + request_timeout: TEST_REQUEST_TIMEOUT, + ..Default::default() + } + } + + /// Return a gRPC config for benchmarking. + pub fn bench() -> Self { + Self { + request_timeout: BENCH_REQUEST_TIMEOUT, + max_connection_age: BENCH_REQUEST_TIMEOUT, + burst_size: NonZeroU32::new(100_000).unwrap(), + replenish_n_per_second_per_ip: NonZeroU64::new(100_000).unwrap(), + max_concurrent_connections: u64::MAX, + } + } +} diff --git a/crates/utils/src/grpc.rs b/crates/utils/src/grpc.rs index 86f84e0999..19e2c90d55 100644 --- a/crates/utils/src/grpc.rs +++ b/crates/utils/src/grpc.rs @@ -21,3 +21,7 @@ mod private { pub trait Sealed {} impl Sealed for url::Url {} } + +pub mod connect_info; +mod layers; +pub use layers::*; diff --git a/crates/utils/src/grpc/connect_info.rs b/crates/utils/src/grpc/connect_info.rs new file mode 100644 index 0000000000..a5b0345d35 --- /dev/null +++ b/crates/utils/src/grpc/connect_info.rs @@ -0,0 +1,18 @@ +use tonic::service::Interceptor; +use tonic::{Request, Status}; + +// Extracts the IP for connection management and rate-limiting requests, called `Governor`. +#[derive(Debug, Clone)] +pub struct ConnectInfoInterceptor; + +impl Interceptor for ConnectInfoInterceptor { + fn call(&mut self, mut request: Request<()>) -> Result, Status> { + let addr = request + .remote_addr() + .ok_or_else(|| Status::failed_precondition("Expected TCP connection"))?; + request + .metadata_mut() + .insert("forwarded", format!("for={addr}").try_into().unwrap()); + Ok(request) + } +} diff --git a/crates/utils/src/grpc/layers.rs b/crates/utils/src/grpc/layers.rs new file mode 100644 index 0000000000..894412f13c --- /dev/null +++ b/crates/utils/src/grpc/layers.rs @@ -0,0 +1,63 @@ +use std::time::Duration; + +use anyhow::{Context, ensure}; +use governor::middleware::StateInformationMiddleware; +use tonic::service::InterceptorLayer; +use tower::limit::GlobalConcurrencyLimitLayer; +use tower_governor::governor::GovernorConfigBuilder; +use tower_governor::key_extractor::SmartIpKeyExtractor; + +use super::connect_info::ConnectInfoInterceptor; +use crate::clap::GrpcOptionsExternal; + +/// Creates the gRPC interceptor layer that attaches connection metadata. +pub fn connect_info_layer() -> InterceptorLayer { + InterceptorLayer::new(ConnectInfoInterceptor) +} + +/// Builds a global concurrency limit layer using the configured semaphore. +pub fn rate_limit_concurrent_connections( + grpc_options: GrpcOptionsExternal, +) -> GlobalConcurrencyLimitLayer { + tower::limit::GlobalConcurrencyLimitLayer::new(grpc_options.max_concurrent_connections as usize) +} + +/// Creates a per-IP rate limit layer using the configured governor settings. +pub fn rate_limit_per_ip( + grpc_options: GrpcOptionsExternal, +) -> anyhow::Result< + tower_governor::GovernorLayer< + SmartIpKeyExtractor, + StateInformationMiddleware, + tonic::body::Body, + >, +> { + let nanos_per_replenish = Duration::from_secs(1) + .as_nanos() + .checked_div(u128::from(grpc_options.replenish_n_per_second_per_ip.get())) + .unwrap_or_default(); + ensure!( + nanos_per_replenish > 0, + "grpc.replenish_n_per_second must be less than or equal to 1e9" + ); + let replenish_period = Duration::from_nanos( + u64::try_from(nanos_per_replenish).context("invalid gRPC rate limit configuration")?, + ); + let config = GovernorConfigBuilder::default() + .key_extractor(SmartIpKeyExtractor) + .period(replenish_period) + .burst_size(grpc_options.burst_size.into()) + .use_headers() + .finish() + .context("invalid gRPC rate limit configuration")?; + let limiter = std::sync::Arc::clone(config.limiter()); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(60)); + loop { + interval.tick().await; + // avoid a DoS vector + limiter.retain_recent(); + } + }); + Ok(tower_governor::GovernorLayer::new(config)) +} diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 530e971e49..6b01dd348f 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -1,3 +1,4 @@ +pub mod clap; pub mod config; pub mod cors; pub mod crypto; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 89d28d25de..27820e1c01 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -1,13 +1,13 @@ use std::net::SocketAddr; use std::num::NonZeroUsize; use std::sync::Arc; -use std::time::Duration; use anyhow::Context; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; use miden_node_utils::ErrorReport; +use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::lru_cache::LruCache; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::OpenTelemetrySpanExt; @@ -46,10 +46,10 @@ pub type ValidatedTransactions = LruCache; pub struct Validator { /// The address of the validator component. pub address: SocketAddr, - /// Server-side timeout for an individual gRPC request. + /// gRPC server options for internal services (timeouts, connection caps). /// /// If the handler takes longer than this duration, the server cancels the call. - pub grpc_timeout: Duration, + pub grpc_options: GrpcOptionsInternal, /// The signer used to sign blocks. pub signer: S, @@ -85,7 +85,7 @@ impl Validator { tonic::transport::Server::builder() .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) - .timeout(self.grpc_timeout) + .timeout(self.grpc_options.request_timeout) .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer))) .add_service(reflection_service) .add_service(reflection_service_alpha) diff --git a/docs/external/src/operator/usage.md b/docs/external/src/operator/usage.md index fa48617231..b90ac914b0 100644 --- a/docs/external/src/operator/usage.md +++ b/docs/external/src/operator/usage.md @@ -105,6 +105,17 @@ miden-node bundled start \ --rpc.url http://0.0.0.0:57291 ``` +### gRPC server limits and timeouts + +The RPC component enforces per-request timeouts, per-IP rate limits, and global concurrency caps. Configure these +settings for bundled or standalone RPC with the following options: + +- `--grpc.timeout` (default `10s`): Maximum request duration before the server drops the request. +- `--grpc.max_connection_age` (default `30m`): Maximum lifetime of a connection before the server closes it. +- `--grpc.burst_size` (default `128`): Per-IP burst capacity before rate limiting kicks in. +- `--grpc.replenish_per_sec` (default `16`): Per-IP request credits replenished per second. +- `--grpc.max_global_connections` (default `1000`): Maximum concurrent gRPC connections across all clients. + ## Systemd Our [Debian packages](./installation.md#debian-package) install a systemd service which operates the node in `bundled` From 0a188d2a2b915efa951f27b30b93146be7e9c13f Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 11 Mar 2026 19:50:35 +0100 Subject: [PATCH 6/8] Revert "feat(store): prune in-memory `InnerForest` (#1635)" (#1781) This reverts commit 37335ef2d53be6dc4da0b3855959a156b4708636. --- CHANGELOG.md | 1 - Cargo.lock | 9 +- Cargo.toml | 3 +- crates/store/Cargo.toml | 1 - crates/store/src/db/mod.rs | 104 +- .../store/src/db/models/queries/accounts.rs | 17 +- crates/store/src/db/tests.rs | 921 +----------------- crates/store/src/inner_forest/mod.rs | 539 +++++----- crates/store/src/inner_forest/tests.rs | 906 +++++------------ crates/store/src/state/loader.rs | 2 +- crates/store/src/state/mod.rs | 116 +-- 11 files changed, 554 insertions(+), 2065 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2ace29fbc..fbccbf3d26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,7 +53,6 @@ ### Enhancements -- Added cleanup of old account data from the in-memory forest ([#1175](https://github.com/0xMiden/miden-node/issues/1175)) - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). diff --git a/Cargo.lock b/Cargo.lock index 219e5210ee..24936392a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2639,9 +2639,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.19.6" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999926d48cf0929a39e06ce22299084f11d307ca9e765801eb56bf192b07054b" +checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" dependencies = [ "blake3", "cc", @@ -2674,9 +2674,9 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.6" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550b5656b791fec59c0b6089b4d0368db746a34749ccd47e59afb01aa877e9e" +checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" dependencies = [ "quote", "syn 2.0.114", @@ -2975,7 +2975,6 @@ dependencies = [ "rand_chacha 0.9.0", "regex", "serde", - "tempfile", "termtree", "thiserror 2.0.18", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 7ffc9f6319..a1a9387756 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,7 @@ miden-tx-batch-prover = { version = "0.13" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } -miden-crypto = { version = "0.19.5" } +miden-crypto = { default-features = false, version = "0.19" } # External dependencies anyhow = { version = "1.0" } @@ -87,7 +87,6 @@ rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } -tempfile = { version = "3.12" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index b5344e8d99..837f158859 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -55,7 +55,6 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } -tempfile = { workspace = true } termtree = { version = "0.5" } [features] diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index a03d2c6ad1..045ba9a9a2 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,5 +1,4 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::mem::size_of; use std::ops::RangeInclusive; use std::path::PathBuf; @@ -14,7 +13,6 @@ use diesel::{ }; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_proto::generated as proto; -use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; @@ -43,13 +41,6 @@ use crate::db::models::{Page, deserialize_raw_vec, queries}; use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; use crate::genesis::GenesisBlock; -const STORAGE_MAP_VALUE_PER_ROW_BYTES: usize = - 2 * size_of::() + size_of::() + size_of::(); - -fn default_storage_map_entries_limit() -> usize { - MAX_RESPONSE_PAYLOAD_BYTES / STORAGE_MAP_VALUE_PER_ROW_BYTES -} - pub(crate) mod manager; mod migrations; @@ -646,106 +637,13 @@ impl Db { &self, account_id: AccountId, block_range: RangeInclusive, - entries_limit: Option, ) -> Result { - let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit); - self.transact("select storage map sync values", move |conn| { - models::queries::select_account_storage_map_values_paged( - conn, - account_id, - block_range, - entries_limit, - ) + models::queries::select_account_storage_map_values(conn, account_id, block_range) }) .await } - /// Reconstructs storage map details from the database for a specific slot at a block. - /// - /// Used as fallback when `InnerForest` cache misses (historical or evicted queries). - /// Rebuilds all entries by querying the DB and filtering to the specific slot. - /// - /// Returns: - /// - `::LimitExceeded` when too many entries are present - /// - `::AllEntries` if the size is less than or equal given `entries_limit`, if any - pub(crate) async fn reconstruct_storage_map_from_db( - &self, - account_id: AccountId, - slot_name: miden_protocol::account::StorageSlotName, - block_num: BlockNumber, - entries_limit: Option, - ) -> Result { - use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; - use miden_protocol::EMPTY_WORD; - - // TODO this remains expensive with a large history until we implement pruning for DB - // columns - let mut values = Vec::new(); - let mut block_range_start = BlockNumber::GENESIS; - let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit); - - let mut page = self - .select_storage_map_sync_values( - account_id, - block_range_start..=block_num, - Some(entries_limit), - ) - .await?; - - values.extend(page.values); - let mut last_block_included = page.last_block_included; - - loop { - if page.last_block_included == block_num || page.last_block_included < block_range_start - { - break; - } - - block_range_start = page.last_block_included.child(); - page = self - .select_storage_map_sync_values( - account_id, - block_range_start..=block_num, - Some(entries_limit), - ) - .await?; - - if page.last_block_included <= last_block_included { - return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); - } - - last_block_included = page.last_block_included; - values.extend(page.values); - } - - if page.last_block_included != block_num { - return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); - } - - // Filter to the specific slot and collect latest values per key - let mut latest_values = BTreeMap::::new(); - for value in values { - if value.slot_name == slot_name { - let raw_key = value.key; - latest_values.insert(raw_key, value.value); - } - } - - // Remove EMPTY_WORD entries (deletions) - latest_values.retain(|_, v| *v != EMPTY_WORD); - - if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { - return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); - } - - let entries = Vec::from_iter(latest_values.into_iter()); - Ok(AccountStorageMapDetails { - slot_name, - entries: StorageMapEntries::AllEntries(entries), - }) - } - /// Emits size metrics for each table in the database, and the entire database. #[instrument(target = COMPONENT, skip_all, err)] pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index c117e8ab94..6f8e3834f6 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -653,14 +653,19 @@ impl StorageMapValue { /// /// * Response payload size: 0 <= size <= 2MB /// * Storage map values per response: 0 <= count <= (2MB / (2*Word + u32 + u8)) + 1 -pub(crate) fn select_account_storage_map_values_paged( +pub(crate) fn select_account_storage_map_values( conn: &mut SqliteConnection, account_id: AccountId, block_range: RangeInclusive, - limit: usize, ) -> Result { use schema::account_storage_map_values as t; + // TODO: These limits should be given by the protocol. + // See miden-base/issues/1770 for more details + pub const ROW_OVERHEAD_BYTES: usize = + 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx + pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + if !account_id.has_public_state() { return Err(DatabaseError::AccountNotPublic(account_id)); } @@ -681,13 +686,13 @@ pub(crate) fn select_account_storage_map_values_paged( .and(t::block_num.le(block_range.end().to_raw_sql())), ) .order(t::block_num.asc()) - .limit(i64::try_from(limit + 1).expect("limit fits within i64")) + .limit(i64::try_from(MAX_ROWS + 1).expect("limit fits within i64")) .load(conn)?; // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() > limit + && raw.len() > MAX_ROWS { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at @@ -703,9 +708,7 @@ pub(crate) fn select_account_storage_map_values_paged( } else { ( *block_range.end(), - raw.into_iter() - .map(StorageMapValue::from_raw_row) - .collect::, _>>()?, + raw.into_iter().map(StorageMapValue::from_raw_row).collect::>()?, ) }; diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 5b5cbd19c5..44b11c9b43 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -4,9 +4,8 @@ use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; -use assert_matches::assert_matches; use diesel::{Connection, SqliteConnection}; -use miden_node_proto::domain::account::{AccountSummary, StorageMapEntries}; +use miden_node_proto::domain::account::AccountSummary; use miden_node_utils::fee::{test_fee, test_fee_params}; use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::delta::AccountUpdateDetails; @@ -37,7 +36,6 @@ use miden_protocol::block::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::crypto::merkle::SparseMerklePath; -use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::crypto::rand::RpoRandomCoin; use miden_protocol::note::{ Note, @@ -73,7 +71,6 @@ use miden_standards::code_builder::CodeBuilder; use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; use pretty_assertions::assert_eq; use rand::Rng; -use tempfile::tempdir; use super::{AccountInfo, NoteRecord, NullifierInfo}; use crate::db::TransactionSummary; @@ -81,7 +78,6 @@ use crate::db::migrations::apply_migrations; use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; use crate::db::models::{Page, queries, utils}; use crate::errors::DatabaseError; -use crate::inner_forest::HISTORICAL_BLOCK_RETENTION; fn create_db() -> SqliteConnection { let mut conn = SqliteConnection::establish(":memory:").expect("In memory sqlite always works"); @@ -1073,13 +1069,9 @@ fn sql_account_storage_map_values_insertion() { AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); insert_account_delta(conn, account_id, block1, &delta1); - let storage_map_page = queries::select_account_storage_map_values_paged( - conn, - account_id, - BlockNumber::GENESIS..=block1, - 1024, - ) - .unwrap(); + let storage_map_page = + queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block1) + .unwrap(); assert_eq!(storage_map_page.values.len(), 2, "expect 2 initial rows"); // Update key1 at block 2 @@ -1092,13 +1084,9 @@ fn sql_account_storage_map_values_insertion() { .unwrap(); insert_account_delta(conn, account_id, block2, &delta2); - let storage_map_values = queries::select_account_storage_map_values_paged( - conn, - account_id, - BlockNumber::GENESIS..=block2, - 1024, - ) - .unwrap(); + let storage_map_values = + queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block2) + .unwrap(); assert_eq!(storage_map_values.values.len(), 3, "three rows (with duplicate key)"); // key1 should now be value3 at block2; key2 remains value2 at block1 @@ -1192,11 +1180,10 @@ fn select_storage_map_sync_values() { ) .unwrap(); - let page = queries::select_account_storage_map_values_paged( + let page = queries::select_account_storage_map_values( &mut conn, account_id, BlockNumber::from(2)..=BlockNumber::from(3), - 1024, ) .unwrap(); @@ -1249,11 +1236,10 @@ fn select_storage_map_sync_values_for_network_account() { ) .unwrap(); - let page = queries::select_account_storage_map_values_paged( + let page = queries::select_account_storage_map_values( &mut conn, account_id, BlockNumber::GENESIS..=block_num, - 1024, ) .unwrap(); @@ -1264,133 +1250,6 @@ fn select_storage_map_sync_values_for_network_account() { ); } -#[test] -fn select_storage_map_sync_values_paginates_until_last_block() { - let mut conn = create_db(); - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let slot_name = StorageSlotName::mock(7); - - let block1 = BlockNumber::from(1); - let block2 = BlockNumber::from(2); - let block3 = BlockNumber::from(3); - - create_block(&mut conn, block1); - create_block(&mut conn, block2); - create_block(&mut conn, block3); - - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) - .unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) - .unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) - .unwrap(); - - queries::insert_account_storage_map_value( - &mut conn, - account_id, - block1, - slot_name.clone(), - num_to_word(1), - num_to_word(11), - ) - .unwrap(); - queries::insert_account_storage_map_value( - &mut conn, - account_id, - block2, - slot_name.clone(), - num_to_word(2), - num_to_word(22), - ) - .unwrap(); - queries::insert_account_storage_map_value( - &mut conn, - account_id, - block3, - slot_name.clone(), - num_to_word(3), - num_to_word(33), - ) - .unwrap(); - - let page = queries::select_account_storage_map_values_paged( - &mut conn, - account_id, - BlockNumber::GENESIS..=block3, - 1, - ) - .unwrap(); - - assert_eq!(page.last_block_included, block1, "should truncate at block 1"); - assert_eq!(page.values.len(), 1, "should include block 1 only"); -} - -#[tokio::test] -#[miden_node_test_macro::enable_logging] -async fn reconstruct_storage_map_from_db_pages_until_latest() { - let temp_dir = tempdir().unwrap(); - let db_path = temp_dir.path().join("store.sqlite"); - - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let slot_name = StorageSlotName::mock(9); - - let block1 = BlockNumber::from(1); - let block2 = BlockNumber::from(2); - let block3 = BlockNumber::from(3); - - let db = crate::db::Db::load(db_path).await.unwrap(); - let slot_name_for_db = slot_name.clone(); - db.query("insert paged values", move |db_conn| { - db_conn.transaction(|db_conn| { - apply_migrations(db_conn)?; - create_block(db_conn, block1); - create_block(db_conn, block2); - create_block(db_conn, block3); - - queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 0)], block1)?; - queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 1)], block2)?; - queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 2)], block3)?; - - queries::insert_account_storage_map_value( - db_conn, - account_id, - block1, - slot_name_for_db.clone(), - num_to_word(1), - num_to_word(10), - )?; - queries::insert_account_storage_map_value( - db_conn, - account_id, - block2, - slot_name_for_db.clone(), - num_to_word(2), - num_to_word(20), - )?; - queries::insert_account_storage_map_value( - db_conn, - account_id, - block3, - slot_name_for_db.clone(), - num_to_word(3), - num_to_word(30), - )?; - Ok::<_, DatabaseError>(()) - }) - }) - .await - .unwrap(); - - let details = db - .reconstruct_storage_map_from_db(account_id, slot_name.clone(), block3, Some(1)) - .await - .unwrap(); - - assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { - assert_eq!(entries.len(), 3); - }); -} - // UTILITIES // ------------------------------------------------------------------------------------------- fn num_to_word(n: u64) -> Word { @@ -2294,11 +2153,10 @@ fn db_roundtrip_storage_map_values() { .unwrap(); // Retrieve - let page = queries::select_account_storage_map_values_paged( + let page = queries::select_account_storage_map_values( &mut conn, account_id, BlockNumber::GENESIS..=block_num, - 1024, ) .unwrap(); @@ -2422,7 +2280,7 @@ fn db_roundtrip_account_storage_with_maps() { #[test] #[miden_node_test_macro::enable_logging] -fn db_roundtrip_note_metadata_attachment() { +fn test_note_metadata_with_attachment_roundtrip() { let mut conn = create_db(); let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); @@ -2473,760 +2331,3 @@ fn db_roundtrip_note_metadata_attachment() { "NetworkAccountTarget should have the correct target account ID" ); } - -#[test] -#[miden_node_test_macro::enable_logging] -fn inner_forest_matches_db_storage_map_roots_across_updates() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - use miden_protocol::crypto::merkle::smt::Smt; - - use crate::inner_forest::InnerForest; - - /// Reconstructs storage map root from DB entries at a specific block. - fn reconstruct_storage_map_root_from_db( - conn: &mut SqliteConnection, - account_id: AccountId, - slot_name: &StorageSlotName, - block_num: BlockNumber, - ) -> Option { - let storage_values = queries::select_account_storage_map_values_paged( - conn, - account_id, - BlockNumber::GENESIS..=block_num, - 1024, - ) - .unwrap(); - - // Filter to the specific slot and get most recent value for each key - let mut latest_values: BTreeMap = BTreeMap::new(); - for value in storage_values.values { - if value.slot_name == *slot_name { - latest_values.insert(value.key, value.value); - } - } - - if latest_values.is_empty() { - return None; - } - - // Build SMT from entries - let entries: Vec<(Word, Word)> = latest_values - .into_iter() - .filter_map(|(key, value)| { - if value == EMPTY_WORD { - None - } else { - // Keys are stored unhashed in DB, match InnerForest behavior - Some((key, value)) - } - }) - .collect(); - - if entries.is_empty() { - use miden_protocol::crypto::merkle::EmptySubtreeRoots; - use miden_protocol::crypto::merkle::smt::SMT_DEPTH; - return Some(*EmptySubtreeRoots::entry(SMT_DEPTH, 0)); - } - - let mut smt = Smt::default(); - for (key, value) in entries { - smt.insert(miden_protocol::account::StorageMap::hash_key(key), value).unwrap(); - } - - Some(smt.root()) - } - - let mut conn = create_db(); - let mut forest = InnerForest::new(); - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - - let block1 = BlockNumber::from(1); - let block2 = BlockNumber::from(2); - let block3 = BlockNumber::from(3); - - create_block(&mut conn, block1); - create_block(&mut conn, block2); - create_block(&mut conn, block3); - - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) - .unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) - .unwrap(); - queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) - .unwrap(); - - let slot_map = StorageSlotName::mock(1); - let slot_value = StorageSlotName::mock(2); - - let key1 = num_to_word(100); - let key2 = num_to_word(200); - let value1 = num_to_word(1000); - let value2 = num_to_word(2000); - let value3 = num_to_word(3000); - - // Block 1: Add storage map entries and a storage value - let mut map_delta_1 = StorageMapDelta::default(); - map_delta_1.insert(key1, value1); - map_delta_1.insert(key2, value2); - - let raw_1 = BTreeMap::from_iter([ - (slot_map.clone(), StorageSlotDelta::Map(map_delta_1)), - (slot_value.clone(), StorageSlotDelta::Value(value1)), - ]); - let storage_1 = AccountStorageDelta::from_raw(raw_1); - let delta_1 = - AccountDelta::new(account_id, storage_1.clone(), AccountVaultDelta::default(), Felt::ONE) - .unwrap(); - - insert_account_delta(&mut conn, account_id, block1, &delta_1); - forest.update_account(block1, &delta_1).unwrap(); - - // Verify forest matches DB for block 1 - let forest_root_1 = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); - let db_root_1 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) - .expect("DB should have storage map root"); - - assert_eq!( - forest_root_1, db_root_1, - "Storage map root at block 1 should match between InnerForest and DB" - ); - - // Block 2: Delete storage map entry (set to EMPTY_WORD) and delete storage value - let mut map_delta_2 = StorageMapDelta::default(); - map_delta_2.insert(key1, EMPTY_WORD); - - let raw_2 = BTreeMap::from_iter([ - (slot_map.clone(), StorageSlotDelta::Map(map_delta_2)), - (slot_value.clone(), StorageSlotDelta::Value(EMPTY_WORD)), - ]); - let storage_2 = AccountStorageDelta::from_raw(raw_2); - let delta_2 = AccountDelta::new( - account_id, - storage_2.clone(), - AccountVaultDelta::default(), - Felt::new(2), - ) - .unwrap(); - - insert_account_delta(&mut conn, account_id, block2, &delta_2); - forest.update_account(block2, &delta_2).unwrap(); - - // Verify forest matches DB for block 2 - let forest_root_2 = forest.get_storage_map_root(account_id, &slot_map, block2).unwrap(); - let db_root_2 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block2) - .expect("DB should have storage map root"); - - assert_eq!( - forest_root_2, db_root_2, - "Storage map root at block 2 should match between InnerForest and DB" - ); - - // Block 3: Re-add same value as block 1 and add different map entry - let mut map_delta_3 = StorageMapDelta::default(); - map_delta_3.insert(key2, value3); // Update existing key - - let raw_3 = BTreeMap::from_iter([ - (slot_map.clone(), StorageSlotDelta::Map(map_delta_3)), - (slot_value.clone(), StorageSlotDelta::Value(value1)), // Same as block 1 - ]); - let storage_3 = AccountStorageDelta::from_raw(raw_3); - let delta_3 = AccountDelta::new( - account_id, - storage_3.clone(), - AccountVaultDelta::default(), - Felt::new(3), - ) - .unwrap(); - - insert_account_delta(&mut conn, account_id, block3, &delta_3); - forest.update_account(block3, &delta_3).unwrap(); - - // Verify forest matches DB for block 3 - let forest_root_3 = forest.get_storage_map_root(account_id, &slot_map, block3).unwrap(); - let db_root_3 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block3) - .expect("DB should have storage map root"); - - assert_eq!( - forest_root_3, db_root_3, - "Storage map root at block 3 should match between InnerForest and DB" - ); - - // Verify we can query historical roots - let forest_root_1_check = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); - let db_root_1_check = - reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) - .expect("DB should have storage map root"); - assert_eq!( - forest_root_1_check, db_root_1_check, - "Historical query for block 1 should match" - ); - - // Verify roots are different across blocks (since we modified the map) - assert_ne!(forest_root_1, forest_root_2, "Roots should differ after deletion"); - assert_ne!(forest_root_2, forest_root_3, "Roots should differ after modification"); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn inner_forest_shared_roots_not_deleted_prematurely() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - use miden_protocol::testing::account_id::{ - ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, - ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, - }; - - use crate::inner_forest::InnerForest; - - let mut forest = InnerForest::new(); - let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); - let account3 = AccountId::try_from(ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE).unwrap(); - - let block01 = BlockNumber::from(1); - let block02 = BlockNumber::from(2); - let block50 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION); - let block51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); - let block52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); - let block53 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 3); - let slot_name = StorageSlotName::mock(1); - - let key1 = num_to_word(100); - let key2 = num_to_word(200); - let value1 = num_to_word(1000); - let value2 = num_to_word(2000); - - // All three accounts add identical storage maps at block 1 - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key1, value1); - map_delta.insert(key2, value2); - - // Setups a single slot with a map and two key-value-pairs - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta.clone()))]); - let storage = AccountStorageDelta::from_raw(raw); - - // Account 1 - let delta1 = - AccountDelta::new(account1, storage.clone(), AccountVaultDelta::default(), Felt::ONE) - .unwrap(); - forest.update_account(block01, &delta1).unwrap(); - - // Account 2 (same storage) - let delta2 = - AccountDelta::new(account2, storage.clone(), AccountVaultDelta::default(), Felt::ONE) - .unwrap(); - forest.update_account(block02, &delta2).unwrap(); - - // Account 3 (same storage) - let delta3 = - AccountDelta::new(account3, storage.clone(), AccountVaultDelta::default(), Felt::ONE) - .unwrap(); - forest.update_account(block02, &delta3).unwrap(); - - // All three accounts should have the same root (structural sharing in SmtForest) - let root1 = forest.get_storage_map_root(account1, &slot_name, block01).unwrap(); - let root2 = forest.get_storage_map_root(account2, &slot_name, block02).unwrap(); - let root3 = forest.get_storage_map_root(account3, &slot_name, block02).unwrap(); - - // identical maps means identical roots - assert_eq!(root1, root2); - assert_eq!(root2, root3); - - // Verify we can get witnesses for all three accounts and verify them against roots - let witness1 = forest - .get_storage_map_witness(account1, &slot_name, block01, key1) - .expect("Account1 should have accessible storage map"); - let witness2 = forest - .get_storage_map_witness(account2, &slot_name, block02, key1) - .expect("Account2 should have accessible storage map"); - let witness3 = forest - .get_storage_map_witness(account3, &slot_name, block02, key1) - .expect("Account3 should have accessible storage map"); - - // Verify witnesses against storage map roots using SmtProof::compute_root - let proof1: SmtProof = witness1.into(); - assert_eq!(proof1.compute_root(), root1, "Witness1 must verify against root1"); - - let proof2: SmtProof = witness2.into(); - assert_eq!(proof2.compute_root(), root2, "Witness2 must verify against root2"); - - let proof3: SmtProof = witness3.into(); - assert_eq!(proof3.compute_root(), root3, "Witness3 must verify against root3"); - - let total_roots_removed = forest.prune(block50); - assert_eq!(total_roots_removed, 0); - - // Update accounts 1,2,3 - let mut map_delta_update = StorageMapDelta::default(); - map_delta_update.insert(key1, num_to_word(1001)); // Slight change - let raw_update = - BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_update))]); - let storage_update = AccountStorageDelta::from_raw(raw_update); - let delta2_update = AccountDelta::new( - account2, - storage_update.clone(), - AccountVaultDelta::default(), - Felt::new(2), - ) - .unwrap(); - forest.update_account(block51, &delta2_update).unwrap(); - - let delta3_update = AccountDelta::new( - account3, - storage_update.clone(), - AccountVaultDelta::default(), - Felt::new(2), - ) - .unwrap(); - forest.update_account(block52, &delta3_update).unwrap(); - - // Prune at block 52 - let total_roots_removed = forest.prune(block52); - assert_eq!(total_roots_removed, 0); - - // ensure the root is still accessible - let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block01); - assert!(account1_root_after_prune.is_some()); - - let delta1_update = - AccountDelta::new(account1, storage_update, AccountVaultDelta::default(), Felt::new(2)) - .unwrap(); - forest.update_account(block53, &delta1_update).unwrap(); - - // Prune at block 53 - let total_roots_removed = forest.prune(block53); - assert_eq!(total_roots_removed, 0); - - // Account2 and Account3 should still be accessible at their recent blocks - let account1_root = forest.get_storage_map_root(account1, &slot_name, block53).unwrap(); - let account2_root = forest.get_storage_map_root(account2, &slot_name, block51).unwrap(); - let account3_root = forest.get_storage_map_root(account3, &slot_name, block52).unwrap(); - - // Verify we can still get witnesses for account2 and account3 and verify against roots - let witness1_after = forest - .get_storage_map_witness(account2, &slot_name, block51, key1) - .expect("Account2 should still have accessible storage map after pruning account1"); - let witness2_after = forest - .get_storage_map_witness(account3, &slot_name, block52, key1) - .expect("Account3 should still have accessible storage map after pruning account1"); - - // Verify witnesses against storage map roots - let proof1: SmtProof = witness1_after.into(); - assert_eq!(proof1.compute_root(), account2_root,); - let proof2: SmtProof = witness2_after.into(); - assert_eq!(proof2.compute_root(), account3_root,); - let account1_witness = forest - .get_storage_map_witness(account1, &slot_name, block53, key1) - .expect("Account1 should still have accessible storage map after pruning"); - let account1_proof: SmtProof = account1_witness.into(); - assert_eq!(account1_proof.compute_root(), account1_root,); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn inner_forest_retains_latest_after_100_blocks_and_pruning() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - use crate::inner_forest::{HISTORICAL_BLOCK_RETENTION, InnerForest}; - - let mut forest = InnerForest::new(); - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); - - let slot_map = StorageSlotName::mock(1); - - let key1 = num_to_word(100); - let key2 = num_to_word(200); - let value1 = num_to_word(1000); - let value2 = num_to_word(2000); - - // Block 1: Apply initial update with vault and storage - let block_1 = BlockNumber::from(1); - - // Create storage map with two entries - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key1, value1); - map_delta.insert(key2, value2); - - let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - - // Create vault with one asset - let asset = FungibleAsset::new(faucet_id, 100).unwrap(); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta.add_asset(asset.into()).unwrap(); - - let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); - - forest.update_account(block_1, &delta_1).unwrap(); - - // Capture the roots from block 1 - let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); - let initial_storage_map_root = - forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); - - // Blocks 2-100: Do nothing (no updates to this account) - // Simulate other activity by just advancing to block 100 - - let block_100 = BlockNumber::from(100); - - assert!(forest.get_vault_root(account_id, block_100).is_some()); - assert_matches!( - forest.get_storage_map_root(account_id, &slot_map, block_100), - Some(root) if root == initial_storage_map_root - ); - - let total_roots_removed = forest.prune(block_100); - - let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; - assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); - assert_eq!(total_roots_removed, 0); - - assert!(forest.get_vault_root(account_id, block_100).is_some()); - assert_matches!( - forest.get_storage_map_root(account_id, &slot_map, block_100), - Some(root) if root == initial_storage_map_root - ); - - let witness = forest.get_storage_map_witness(account_id, &slot_map, block_100, key1); - assert!(witness.is_ok()); - - // Now add an update at block 51 (within retention window) to test that old entries - // get pruned when newer entries exist - let block_51 = BlockNumber::from(51); - - // Update with new values - let value1_new = num_to_word(3000); - let mut map_delta_51 = StorageMapDelta::default(); - map_delta_51.insert(key1, value1_new); - - let raw_51 = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta_51))]); - let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); - - let asset_51 = FungibleAsset::new(faucet_id, 200).unwrap(); - let mut vault_delta_51 = AccountVaultDelta::default(); - vault_delta_51.add_asset(asset_51.into()).unwrap(); - - let delta_51 = - AccountDelta::new(account_id, storage_delta_51, vault_delta_51, Felt::new(51)).unwrap(); - - forest.update_account(block_51, &delta_51).unwrap(); - - // Prune again at block 100 - let total_roots_removed = forest.prune(block_100); - - assert_eq!(total_roots_removed, 0); - - let vault_root_at_51 = forest - .get_vault_root(account_id, block_51) - .expect("Should have vault root at block 51"); - let storage_root_at_51 = forest - .get_storage_map_root(account_id, &slot_map, block_51) - .expect("Should have storage root at block 51"); - - assert_ne!(vault_root_at_51, initial_vault_root); - - let witness = forest - .get_storage_map_witness(account_id, &slot_map, block_51, key1) - .expect("Should be able to get witness for key1"); - - let proof: SmtProof = witness.into(); - assert_eq!( - proof.compute_root(), - storage_root_at_51, - "Witness must verify against storage root" - ); - - let vault_root_at_1 = forest.get_vault_root(account_id, block_1); - assert!(vault_root_at_1.is_some()); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn inner_forest_preserves_most_recent_vault_only() { - use crate::inner_forest::InnerForest; - - let mut forest = InnerForest::new(); - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); - - // Block 1: Create vault with asset - let block_1 = BlockNumber::from(1); - let asset = FungibleAsset::new(faucet_id, 500).unwrap(); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta.add_asset(asset.into()).unwrap(); - - let delta_1 = - AccountDelta::new(account_id, AccountStorageDelta::default(), vault_delta, Felt::ONE) - .unwrap(); - - forest.update_account(block_1, &delta_1).unwrap(); - - let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); - - // Advance 100 blocks without any updates - let block_100 = BlockNumber::from(100); - - // Prune at block 100 - let total_roots_removed = forest.prune(block_100); - - // Vault from block 1 should NOT be pruned (it's the most recent) - assert_eq!( - total_roots_removed, 0, - "Should NOT prune vault root (it's the most recent for this account)" - ); - - // Verify vault is still accessible at block 1 - let vault_root_at_1 = forest - .get_vault_root(account_id, block_1) - .expect("Should still have vault root at block 1"); - assert_eq!(vault_root_at_1, initial_vault_root, "Vault root should be preserved"); - - // Verify we can get witnesses for the vault and verify against vault root - let witnesses = forest - .get_vault_asset_witnesses( - account_id, - block_1, - [AssetVaultKey::new_unchecked(asset.vault_key().into())].into(), - ) - .expect("Should be able to get vault witness after pruning"); - - assert_eq!(witnesses.len(), 1, "Should have one witness"); - let witness = &witnesses[0]; - let proof: SmtProof = witness.clone().into(); - assert_eq!( - proof.compute_root(), - vault_root_at_1, - "Vault witness must verify against vault root" - ); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn inner_forest_preserves_most_recent_storage_map_only() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - use crate::inner_forest::InnerForest; - - let mut forest = InnerForest::new(); - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - - let slot_map = StorageSlotName::mock(1); - let key1 = num_to_word(100); - let value1 = num_to_word(1000); - - // Block 1: Create storage map - let block_1 = BlockNumber::from(1); - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key1, value1); - - let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - - let delta_1 = - AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) - .unwrap(); - - forest.update_account(block_1, &delta_1).unwrap(); - - let initial_storage_root = forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); - - // Advance 100 blocks without any updates - let block_100 = BlockNumber::from(100); - - // Prune at block 100 - let total_roots_removed = forest.prune(block_100); - - // Storage map from block 1 should NOT be pruned (it's the most recent) - assert_eq!(total_roots_removed, 0, "No vault roots to prune"); - - // Verify storage map is still accessible at block 1 - let storage_root_at_1 = forest - .get_storage_map_root(account_id, &slot_map, block_1) - .expect("Should still have storage root at block 1"); - assert_eq!(storage_root_at_1, initial_storage_root, "Storage root should be preserved"); - - // Verify we can get witnesses for the storage map and verify against storage root - let witness = forest - .get_storage_map_witness(account_id, &slot_map, block_1, key1) - .expect("Should be able to get storage witness after pruning"); - - let proof: SmtProof = witness.into(); - assert_eq!( - proof.compute_root(), - storage_root_at_1, - "Storage witness must verify against storage root" - ); - - // Verify we can get all entries -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn inner_forest_preserves_most_recent_storage_value_slot() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::StorageSlotDelta; - - use crate::inner_forest::InnerForest; - - let mut forest = InnerForest::new(); - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - - let slot_value = StorageSlotName::mock(1); - let value1 = num_to_word(5000); - - // Block 1: Create storage value slot - let block_1 = BlockNumber::from(1); - - let raw = BTreeMap::from_iter([(slot_value.clone(), StorageSlotDelta::Value(value1))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - - let delta_1 = - AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) - .unwrap(); - - forest.update_account(block_1, &delta_1).unwrap(); - - // Note: Value slots don't have roots in InnerForest - they're just part of the - // account storage header. The InnerForest only tracks map slots. - // So there's nothing to verify for value slots in the forest. - - // This test documents that value slots are NOT tracked in InnerForest - // (they don't need to be, since their digest is 1:1 with the value) - - // Advance 100 blocks without any updates - let block_100 = BlockNumber::from(100); - - // Prune at block 100 - let total_roots_removed = forest.prune(block_100); - - // No roots should be pruned because there are no map slots - assert_eq!(total_roots_removed, 0, "No vault roots in this test"); - - // Verify no storage map roots exist for this account - let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_1); - assert!( - storage_root.is_none(), - "Value slots don't have storage map roots in InnerForest" - ); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn inner_forest_preserves_mixed_slots_independently() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - use crate::inner_forest::InnerForest; - - let mut forest = InnerForest::new(); - let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); - - let slot_map_a = StorageSlotName::mock(1); - let slot_map_b = StorageSlotName::mock(2); - let slot_value = StorageSlotName::mock(3); - - let key1 = num_to_word(100); - let value1 = num_to_word(1000); - let value_slot_data = num_to_word(5000); - - // Block 1: Create vault + two map slots + one value slot - let block_1 = BlockNumber::from(1); - - let asset = FungibleAsset::new(faucet_id, 100).unwrap(); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta.add_asset(asset.into()).unwrap(); - - let mut map_delta_a = StorageMapDelta::default(); - map_delta_a.insert(key1, value1); - - let mut map_delta_b = StorageMapDelta::default(); - map_delta_b.insert(key1, value1); - - let raw = BTreeMap::from_iter([ - (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), - (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), - (slot_value.clone(), StorageSlotDelta::Value(value_slot_data)), - ]); - let storage_delta = AccountStorageDelta::from_raw(raw); - - let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); - - forest.update_account(block_1, &delta_1).unwrap(); - - let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); - let initial_map_a_root = forest.get_storage_map_root(account_id, &slot_map_a, block_1).unwrap(); - let initial_map_b_root = forest.get_storage_map_root(account_id, &slot_map_b, block_1).unwrap(); - - // Block 51: Update only map_a (within retention window) - let block_51 = BlockNumber::from(51); - let value2 = num_to_word(2000); - - let mut map_delta_a_update = StorageMapDelta::default(); - map_delta_a_update.insert(key1, value2); - - let raw_51 = - BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_update))]); - let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); - - let delta_51 = AccountDelta::new( - account_id, - storage_delta_51, - AccountVaultDelta::default(), - Felt::new(51), - ) - .unwrap(); - - forest.update_account(block_51, &delta_51).unwrap(); - - // Advance to block 100 - let block_100 = BlockNumber::from(100); - - // Prune at block 100 - let total_roots_removed = forest.prune(block_100); - - // Vault: block 1 is most recent, should NOT be pruned - // Map A: block 1 is old (block 51 is newer), SHOULD be pruned - // Map B: block 1 is most recent, should NOT be pruned - assert_eq!( - total_roots_removed, 0, - "Vault root from block 1 should NOT be pruned (most recent)" - ); - - // Verify vault is still accessible - let vault_root_at_1 = - forest.get_vault_root(account_id, block_1).expect("Vault should be accessible"); - assert_eq!(vault_root_at_1, initial_vault_root, "Vault should be from block 1"); - - // Verify map_a is accessible (from block 51) - let map_a_root_at_51 = forest - .get_storage_map_root(account_id, &slot_map_a, block_51) - .expect("Map A should be accessible"); - assert_ne!( - map_a_root_at_51, initial_map_a_root, - "Map A should be from block 51, not block 1" - ); - - // Verify map_b is still accessible (from block 1) - let map_b_root_at_1 = forest - .get_storage_map_root(account_id, &slot_map_b, block_1) - .expect("Map B should be accessible"); - assert_eq!( - map_b_root_at_1, initial_map_b_root, - "Map B should still be from block 1 (most recent)" - ); - - // Verify map_a block 1 is no longer accessible - let map_a_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_a, block_1); - assert!(map_a_root_at_1.is_some(), "Map A block 1 should be pruned"); -} diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 4b2376ae69..0429864067 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,20 +1,6 @@ -use std::collections::BTreeSet; - -use miden_crypto::hash::rpo::Rpo256; -use miden_crypto::merkle::smt::{ - ForestInMemoryBackend, - ForestOperation, - LargeSmtForest, - LargeSmtForestError, - LineageId, - RootInfo, - SMT_DEPTH, - SmtUpdateBatch, - TreeId, -}; -use miden_crypto::merkle::{EmptySubtreeRoots, MerkleError}; -use miden_node_proto::domain::account::AccountStorageMapDetails; -use miden_node_utils::ErrorReport; +use std::collections::{BTreeMap, BTreeSet}; + +use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ AccountId, @@ -25,33 +11,30 @@ use miden_protocol::account::{ }; use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; +use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; use miden_protocol::errors::{AssetError, StorageMapError}; -use miden_protocol::utils::Serializable; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; -use tracing::instrument; - -use crate::COMPONENT; #[cfg(test)] mod tests; -// CONSTANTS -// ================================================================================================ - -/// Number of historical blocks to retain in the in-memory forest. -/// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be pruned. -pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; - // ERRORS // ================================================================================================ #[derive(Debug, Error)] pub enum InnerForestError { - #[error(transparent)] - Asset(#[from] AssetError), - #[error(transparent)] - Forest(#[from] LargeSmtForestError), + #[error( + "balance underflow: account {account_id}, faucet {faucet_id}, \ + previous balance {prev_balance}, delta {delta}" + )] + BalanceUnderflow { + account_id: AccountId, + faucet_id: AccountId, + prev_balance: u64, + delta: i64, + }, } #[derive(Debug, Error)] @@ -71,19 +54,31 @@ pub enum WitnessError { /// Container for forest-related state that needs to be updated atomically. pub(crate) struct InnerForest { - /// `LargeSmtForest` for efficient account storage reconstruction. + /// `SmtForest` for efficient account storage reconstruction. /// Populated during block import with storage and vault SMTs. - forest: LargeSmtForest, + forest: SmtForest, + + /// Maps (`account_id`, `slot_name`, `block_num`) to SMT root. + /// Populated during block import for all storage map slots. + storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, + + /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. + /// Accumulated from deltas - each block's entries include all entries up to that point. + storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, + + /// Maps (`account_id`, `block_num`) to vault SMT root. + /// Tracks asset vault versions across all blocks with structural sharing. + vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, } impl InnerForest { pub(crate) fn new() -> Self { - Self { forest: Self::create_forest() } - } - - fn create_forest() -> LargeSmtForest { - let backend = ForestInMemoryBackend::new(); - LargeSmtForest::new(backend).expect("in-memory backend should initialize") + Self { + forest: SmtForest::new(), + storage_map_roots: BTreeMap::new(), + storage_entries: BTreeMap::new(), + vault_roots: BTreeMap::new(), + } } // HELPERS @@ -94,147 +89,38 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } - #[cfg(test)] - fn tree_id_for_root( - &self, - account_id: AccountId, - slot_name: &StorageSlotName, - block_num: BlockNumber, - ) -> TreeId { - let lineage = Self::storage_lineage_id(account_id, slot_name); - self.lookup_tree_id(lineage, block_num) - } - - #[cfg(test)] - fn tree_id_for_vault_root(&self, account_id: AccountId, block_num: BlockNumber) -> TreeId { - let lineage = Self::vault_lineage_id(account_id); - self.lookup_tree_id(lineage, block_num) - } - - #[expect(clippy::unused_self)] - fn lookup_tree_id(&self, lineage: LineageId, block_num: BlockNumber) -> TreeId { - TreeId::new(lineage, block_num.as_u64()) - } - - fn storage_lineage_id(account_id: AccountId, slot_name: &StorageSlotName) -> LineageId { - let mut bytes = Vec::new(); - bytes.extend_from_slice(&account_id.to_bytes()); - bytes.extend_from_slice(slot_name.as_str().as_bytes()); - LineageId::new(Rpo256::hash(&bytes).as_bytes()) - } - - fn vault_lineage_id(account_id: AccountId) -> LineageId { - LineageId::new(Rpo256::hash(&account_id.to_bytes()).as_bytes()) - } - - fn build_forest_operations( - entries: impl IntoIterator, - ) -> Vec { - entries - .into_iter() - .map(|(key, value)| { - if value == EMPTY_WORD { - ForestOperation::remove(key) - } else { - ForestOperation::insert(key, value) - } - }) - .collect() - } - - fn apply_forest_updates( - &mut self, - lineage: LineageId, - block_num: BlockNumber, - operations: Vec, - ) -> Word { - let updates = if operations.is_empty() { - SmtUpdateBatch::empty() - } else { - SmtUpdateBatch::new(operations.into_iter()) - }; - let version = block_num.as_u64(); - let tree = if self.forest.latest_version(lineage).is_some() { - self.forest - .update_tree(lineage, version, updates) - .expect("forest update should succeed") - } else { - self.forest - .add_lineage(lineage, version, updates) - .expect("forest update should succeed") - }; - tree.root() - } - - fn map_forest_error(error: LargeSmtForestError) -> MerkleError { - match error { - LargeSmtForestError::Merkle(merkle) => merkle, - other => MerkleError::InternalError(other.as_report()), - } - } - - fn map_forest_error_to_witness(error: LargeSmtForestError) -> WitnessError { - match error { - LargeSmtForestError::Merkle(merkle) => WitnessError::MerkleError(merkle), - other => WitnessError::MerkleError(MerkleError::InternalError(other.as_report())), - } - } - - // ACCESSORS - // -------------------------------------------------------------------------------------------- - - fn get_tree_id(&self, lineage: LineageId, block_num: BlockNumber) -> Option { - let tree = self.lookup_tree_id(lineage, block_num); - match self.forest.root_info(tree) { - RootInfo::LatestVersion(_) | RootInfo::HistoricalVersion(_) => Some(tree), - RootInfo::Missing => { - let latest_version = self.forest.latest_version(lineage)?; - if latest_version <= block_num.as_u64() { - Some(TreeId::new(lineage, latest_version)) - } else { - None - } - }, - } - } - - #[cfg(test)] - fn get_tree_root(&self, lineage: LineageId, block_num: BlockNumber) -> Option { - let tree = self.get_tree_id(lineage, block_num)?; - match self.forest.root_info(tree) { - RootInfo::LatestVersion(root) | RootInfo::HistoricalVersion(root) => Some(root), - RootInfo::Missing => None, - } - } - - /// Retrieves a vault root for the specified account and block. - #[cfg(test)] + /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, account_id: AccountId, block_num: BlockNumber, ) -> Option { - let lineage = Self::vault_lineage_id(account_id); - self.get_tree_root(lineage, block_num) + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) } - /// Retrieves the storage map root for an account slot at the specified block. - #[cfg(test)] + /// Retrieves the storage map root for an account slot at or before the specified block. pub(crate) fn get_storage_map_root( &self, account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, ) -> Option { - let lineage = Self::storage_lineage_id(account_id, slot_name); - self.get_tree_root(lineage, block_num) + self.storage_map_roots + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), block_num), + ) + .next_back() + .map(|(_, root)| *root) } - // WITNESSES and PROOFS - // -------------------------------------------------------------------------------------------- - /// Retrieves a storage map witness for the specified account and storage slot. /// + /// Finds the most recent witness at or before the specified block number. + /// /// Note that the `raw_key` is the raw, user-provided key that needs to be hashed in order to /// get the actual key into the storage map. pub(crate) fn get_storage_map_witness( @@ -244,10 +130,11 @@ impl InnerForest { block_num: BlockNumber, raw_key: Word, ) -> Result { - let lineage = Self::storage_lineage_id(account_id, slot_name); - let tree = self.get_tree_id(lineage, block_num).ok_or(WitnessError::RootNotFound)?; let key = StorageMap::hash_key(raw_key); - let proof = self.forest.open(tree, key).map_err(Self::map_forest_error_to_witness)?; + let root = self + .get_storage_map_root(account_id, slot_name, block_num) + .ok_or(WitnessError::RootNotFound)?; + let proof = self.forest.open(root, key)?; Ok(StorageMapWitness::new(proof, vec![raw_key])?) } @@ -260,42 +147,72 @@ impl InnerForest { block_num: BlockNumber, asset_keys: BTreeSet, ) -> Result, WitnessError> { - let lineage = Self::vault_lineage_id(account_id); - let tree = self.get_tree_id(lineage, block_num).ok_or(WitnessError::RootNotFound)?; - let witnessees: Result, WitnessError> = - Result::from_iter(asset_keys.into_iter().map(|key| { - let proof = self - .forest - .open(tree, key.into()) - .map_err(Self::map_forest_error_to_witness)?; + let root = self.get_vault_root(account_id, block_num).ok_or(WitnessError::RootNotFound)?; + let witnessees = asset_keys + .into_iter() + .map(|key| { + let proof = self.forest.open(root, key.into())?; let asset = AssetWitness::new(proof)?; Ok(asset) - })); - witnessees + }) + .collect::, WitnessError>>()?; + Ok(witnessees) } /// Opens a storage map and returns storage map details with SMT proofs for the given keys. /// /// Returns `None` if no storage root is tracked for this account/slot/block combination. /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. - pub(crate) fn get_storage_map_details_for_keys( + pub(crate) fn open_storage_map( &self, account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, raw_keys: &[Word], ) -> Option> { - let lineage = Self::storage_lineage_id(account_id, &slot_name); - let tree = self.get_tree_id(lineage, block_num)?; + let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; + // Collect SMT proofs for each key let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { let key = StorageMap::hash_key(*raw_key); - self.forest.open(tree, key).map_err(Self::map_forest_error) + self.forest.open(root, key) })); Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) } + /// Returns all key-value entries for a specific account storage slot at or before a block. + /// + /// Uses range query semantics: finds the most recent entries at or before `block_num`. + /// Returns `None` if no entries exist for this account/slot up to the given block. + /// Returns `LimitExceeded` if there are too many entries to return. + pub(crate) fn storage_map_entries( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + ) -> Option { + // Find the most recent entries at or before block_num + let entries = self + .storage_entries + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), block_num), + ) + .next_back() + .map(|(_, entries)| entries)?; + + if entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Some(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::LimitExceeded, + }); + } + let entries = Vec::from_iter(entries.iter().map(|(k, v)| (*k, *v))); + + Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) + } + // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- @@ -312,7 +229,6 @@ impl InnerForest { /// # Errors /// /// Returns an error if applying a vault delta results in a negative balance. - #[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] pub(crate) fn apply_block_updates( &mut self, block_num: BlockNumber, @@ -329,9 +245,6 @@ impl InnerForest { "Updated forest with account delta" ); } - - self.prune(block_num); - Ok(()) } @@ -376,8 +289,10 @@ impl InnerForest { /// Retrieves the most recent vault SMT root for an account. If no vault root is found for the /// account, returns an empty SMT root. fn get_latest_vault_root(&self, account_id: AccountId) -> Word { - let lineage = Self::vault_lineage_id(account_id); - self.forest.latest_root(lineage).unwrap_or_else(Self::empty_smt_root) + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) } /// Inserts asset vault data into the forest for the specified account. Assumes that asset @@ -390,25 +305,13 @@ impl InnerForest { ) { // get the current vault root for the account, and make sure it is empty let prev_root = self.get_latest_vault_root(account_id); - let lineage = Self::vault_lineage_id(account_id); assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); - assert!( - self.forest.latest_version(lineage).is_none(), - "account should not be in the forest" - ); + // if there are no assets in the vault, add a root of an empty SMT to the vault roots map + // so that the map has entries for all accounts, and then return (i.e., no need to insert + // anything into the forest) if delta.is_empty() { - let lineage = Self::vault_lineage_id(account_id); - let new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); - - tracing::debug!( - target: crate::COMPONENT, - %account_id, - %block_num, - %new_root, - vault_entries = 0, - "Inserted vault into forest" - ); + self.vault_roots.insert((account_id, block_num), prev_root); return; } @@ -423,26 +326,25 @@ impl InnerForest { } // process non-fungible assets - for (&asset, action) in delta.non_fungible().iter() { - let asset_vault_key = asset.vault_key().into(); - match action { - NonFungibleDeltaAction::Add => entries.push((asset_vault_key, asset.into())), - NonFungibleDeltaAction::Remove => entries.push((asset_vault_key, EMPTY_WORD)), - } + for (&asset, _action) in delta.non_fungible().iter() { + // TODO: assert that action is addition + entries.push((asset.vault_key().into(), asset.into())); } assert!(!entries.is_empty(), "non-empty delta should contain entries"); let num_entries = entries.len(); - let lineage = Self::vault_lineage_id(account_id); - let operations = Self::build_forest_operations(entries); - let new_root = self.apply_forest_updates(lineage, block_num, operations); + let new_root = self + .forest + .batch_insert(prev_root, entries) + .expect("forest insertion should succeed"); + + self.vault_roots.insert((account_id, block_num), new_root); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, - %new_root, vault_entries = num_entries, "Inserted vault into forest" ); @@ -466,39 +368,41 @@ impl InnerForest { assert!(!delta.is_empty(), "expected the delta not to be empty"); // get the previous vault root; the root could be for an empty or non-empty SMT - let lineage = Self::vault_lineage_id(account_id); - let prev_tree = - self.forest.latest_version(lineage).map(|version| TreeId::new(lineage, version)); + let prev_root = self.get_latest_vault_root(account_id); let mut entries: Vec<(Word, Word)> = Vec::new(); // Process fungible assets for (faucet_id, amount_delta) in delta.fungible().iter() { - let delta_abs = amount_delta.unsigned_abs(); - let delta = FungibleAsset::new(*faucet_id, delta_abs)?; - let key = Word::from(delta.vault_key()); - - let empty = FungibleAsset::new(*faucet_id, 0)?; - let asset = if let Some(tree) = prev_tree { - self.forest - .get(tree, key)? - .map(FungibleAsset::try_from) - .transpose()? - .unwrap_or(empty) - } else { - empty - }; - - let updated = if *amount_delta < 0 { - asset.sub(delta)? - } else { - asset.add(delta)? + let key: Word = + FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); + + let new_amount = { + // amount delta is a change that must be applied to previous balance. + // + // TODO: SmtForest only exposes `fn open()` which computes a full Merkle proof. We + // only need the leaf, so a direct `fn get()` method would be faster. + let prev_amount = self + .forest + .open(prev_root, key) + .ok() + .and_then(|proof| proof.get(&key)) + .and_then(|word| FungibleAsset::try_from(word).ok()) + .map_or(0, |asset| asset.amount()); + + let new_balance = i128::from(prev_amount) + i128::from(*amount_delta); + u64::try_from(new_balance).map_err(|_| InnerForestError::BalanceUnderflow { + account_id, + faucet_id: *faucet_id, + prev_balance: prev_amount, + delta: *amount_delta, + })? }; - let value = if updated.amount() == 0 { + let value = if new_amount == 0 { EMPTY_WORD } else { - Word::from(updated) + FungibleAsset::new(*faucet_id, new_amount).expect("valid fungible asset").into() }; entries.push((key, value)); } @@ -512,18 +416,21 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - let vault_entries = entries.len(); + assert!(!entries.is_empty(), "non-empty delta should contain entries"); + let num_entries = entries.len(); - let lineage = Self::vault_lineage_id(account_id); - let operations = Self::build_forest_operations(entries); - let new_root = self.apply_forest_updates(lineage, block_num, operations); + let new_root = self + .forest + .batch_insert(prev_root, entries) + .expect("forest insertion should succeed"); + + self.vault_roots.insert((account_id, block_num), new_root); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, - %new_root, - %vault_entries, + vault_entries = num_entries, "Updated vault in forest" ); Ok(()) @@ -539,8 +446,30 @@ impl InnerForest { account_id: AccountId, slot_name: &StorageSlotName, ) -> Word { - let lineage = Self::storage_lineage_id(account_id, slot_name); - self.forest.latest_root(lineage).map_or_else(Self::empty_smt_root, |root| root) + self.storage_map_roots + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + /// Retrieves the most recent entries in the specified storage map. If no storage map exists + /// returns an empty map. + fn get_latest_storage_map_entries( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + ) -> BTreeMap { + self.storage_entries + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next_back() + .map(|(_, entries)| entries.clone()) + .unwrap_or_default() } /// Inserts all storage maps from the provided storage delta into the forest. @@ -560,44 +489,60 @@ impl InnerForest { // build a vector of raw entries and filter out any empty values; such values // shouldn't be present in full-state deltas, but it is good to exclude them // explicitly - let raw_map_entries: Vec<(Word, Word)> = - Vec::from_iter(map_delta.entries().iter().filter_map(|(&key, &value)| { + let raw_map_entries: Vec<(Word, Word)> = map_delta + .entries() + .iter() + .filter_map(|(&key, &value)| { if value == EMPTY_WORD { None } else { Some((Word::from(key), value)) } - })); + }) + .collect(); + // if the delta is empty, make sure we create an entry in the storage map roots map + // and storage entries map (so storage_map_entries() queries work) if raw_map_entries.is_empty() { - let lineage = Self::storage_lineage_id(account_id, slot_name); - let _new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), prev_root); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); continue; } - let hashed_entries = Vec::from_iter( - raw_map_entries - .iter() - .map(|(raw_key, value)| (StorageMap::hash_key(*raw_key), *value)), - ); + // hash the keys before inserting into the forest, matching how `StorageMap` + // hashes keys before inserting into the SMT. + let hashed_entries: Vec<(Word, Word)> = raw_map_entries + .iter() + .map(|(key, value)| (StorageMap::hash_key(*key), *value)) + .collect(); - let lineage = Self::storage_lineage_id(account_id, slot_name); - assert!( - self.forest.latest_version(lineage).is_none(), - "account should not be in the forest" - ); - let operations = Self::build_forest_operations(hashed_entries); - let new_root = self.apply_forest_updates(lineage, block_num, operations); + // insert the updates into the forest and update storage map roots map + let new_root = self + .forest + .batch_insert(prev_root, hashed_entries.iter().copied()) + .expect("forest insertion should succeed"); + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), new_root); + + assert!(!raw_map_entries.is_empty(), "a non-empty delta should have entries"); let num_entries = raw_map_entries.len(); + // keep track of the state of storage map entries (using raw keys for delta merging) + // TODO: this is a temporary solution until the LargeSmtForest is implemented as + // tracking multiple versions of all storage maps will be prohibitively expensive + let map_entries = BTreeMap::from_iter(raw_map_entries); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), map_entries); + tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, ?slot_name, - %new_root, delta_entries = num_entries, "Inserted storage map into forest" ); @@ -614,6 +559,8 @@ impl InnerForest { account_id: AccountId, delta: &AccountStorageDelta, ) { + assert!(!delta.is_empty(), "expected the delta not to be empty"); + for (slot_name, map_delta) in delta.maps() { // map delta shouldn't be empty, but if it is for some reason, there is nothing to do if map_delta.is_empty() { @@ -621,50 +568,48 @@ impl InnerForest { } // update the storage map tree in the forest and add an entry to the storage map roots - let lineage = Self::storage_lineage_id(account_id, slot_name); - let delta_entries: Vec<(Word, Word)> = Vec::from_iter( - map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)), - ); - - let hashed_entries = Vec::from_iter( - delta_entries - .iter() - .map(|(raw_key, value)| (StorageMap::hash_key(*raw_key), *value)), - ); + let prev_root = self.get_latest_storage_map_root(account_id, slot_name); + let delta_entries: Vec<(Word, Word)> = + map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); + + // Hash the keys before inserting into the forest, matching how StorageMap + // hashes keys before inserting into the SMT. + let hashed_entries: Vec<(Word, Word)> = delta_entries + .iter() + .map(|(key, value)| (StorageMap::hash_key(*key), *value)) + .collect(); + + let new_root = self + .forest + .batch_insert(prev_root, hashed_entries.iter().copied()) + .expect("forest insertion should succeed"); + + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), new_root); + + // merge the delta with the latest entries in the map (using raw keys) + // TODO: this is a temporary solution until the LargeSmtForest is implemented as + // tracking multiple versions of all storage maps will be prohibitively expensive + let mut latest_entries = self.get_latest_storage_map_entries(account_id, slot_name); + for (key, value) in &delta_entries { + if *value == EMPTY_WORD { + latest_entries.remove(key); + } else { + latest_entries.insert(*key, *value); + } + } - let operations = Self::build_forest_operations(hashed_entries); - let new_root = self.apply_forest_updates(lineage, block_num, operations); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), latest_entries); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, ?slot_name, - %new_root, delta_entries = delta_entries.len(), "Updated storage map in forest" ); } } - - // PRUNING - // -------------------------------------------------------------------------------------------- - - /// Prunes old entries from the in-memory forest data structures. - /// - /// The `LargeSmtForest` itself is truncated to drop historical versions beyond the cutoff. - /// - /// Returns the number of pruned roots for observability. - #[instrument(target = COMPONENT, skip_all, ret, fields(block.number = %chain_tip))] - pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> usize { - let cutoff_block = chain_tip - .checked_sub(HISTORICAL_BLOCK_RETENTION) - .unwrap_or(BlockNumber::GENESIS); - let before = self.forest.roots().count(); - - self.forest.truncate(cutoff_block.as_u64()); - - let after = self.forest.roots().count(); - before.saturating_sub(after) - } } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 76273404d1..5fc0cc6c0c 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -1,12 +1,8 @@ -use assert_matches::assert_matches; -use miden_node_proto::domain::account::StorageMapEntries; use miden_protocol::account::AccountCode; -use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; -use miden_protocol::crypto::merkle::smt::SmtProof; +use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, - ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; use miden_protocol::{Felt, FieldElement}; @@ -30,6 +26,7 @@ fn dummy_partial_delta( vault_delta: AccountVaultDelta, storage_delta: AccountStorageDelta, ) -> AccountDelta { + // For partial deltas, nonce_delta must be > 0 if there are changes let nonce_delta = if vault_delta.is_empty() && storage_delta.is_empty() { Felt::ZERO } else { @@ -42,36 +39,43 @@ fn dummy_partial_delta( fn dummy_full_state_delta(account_id: AccountId, assets: &[Asset]) -> AccountDelta { use miden_protocol::account::{Account, AccountStorage}; + // Create a minimal account with the given assets let vault = AssetVault::new(assets).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); let nonce = Felt::ONE; let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); + + // Convert to delta - this will be a full-state delta because it has code AccountDelta::try_from(account).unwrap() } -// INITIALIZATION & BASIC OPERATIONS -// ================================================================================================ - #[test] -fn empty_smt_root_is_recognized() { - use miden_crypto::merkle::smt::Smt; +fn test_empty_smt_root_is_recognized() { + use miden_protocol::crypto::merkle::smt::Smt; let empty_root = InnerForest::empty_smt_root(); + // Verify an empty SMT has the expected root assert_eq!(Smt::default().root(), empty_root); + + // Test that SmtForest accepts this root in batch_insert + let mut forest = SmtForest::new(); + let entries = vec![(Word::from([1u32, 2, 3, 4]), Word::from([5u32, 6, 7, 8]))]; + + assert!(forest.batch_insert(empty_root, entries).is_ok()); } #[test] -fn inner_forest_basic_initialization() { +fn test_inner_forest_basic_initialization() { let forest = InnerForest::new(); - assert_eq!(forest.forest.lineage_count(), 0); - assert_eq!(forest.forest.tree_count(), 0); + assert!(forest.storage_map_roots.is_empty()); + assert!(forest.vault_roots.is_empty()); } #[test] -fn update_account_with_empty_deltas() { +fn test_update_account_with_empty_deltas() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); @@ -84,21 +88,37 @@ fn update_account_with_empty_deltas() { forest.update_account(block_num, &delta).unwrap(); - assert!(forest.get_vault_root(account_id, block_num).is_none()); - assert_eq!(forest.forest.lineage_count(), 0); + // Empty deltas should not create entries + assert!(!forest.vault_roots.contains_key(&(account_id, block_num))); + assert!(forest.storage_map_roots.is_empty()); } -// VAULT TESTS -// ================================================================================================ +#[test] +fn test_update_vault_with_fungible_asset() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let block_num = BlockNumber::GENESIS.child(); + + let asset = dummy_fungible_asset(faucet_id, 100); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset).unwrap(); + + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + + let vault_root = forest.vault_roots[&(account_id, block_num)]; + assert_ne!(vault_root, EMPTY_WORD); +} #[test] -fn vault_partial_vs_full_state_produces_same_root() { +fn test_compare_partial_vs_full_state_delta_vault() { let account_id = dummy_account(); let faucet_id = dummy_faucet(); let block_num = BlockNumber::GENESIS.child(); let asset = dummy_fungible_asset(faucet_id, 100); - // Partial delta (block application) + // Approach 1: Partial delta (simulates block application) let mut forest_partial = InnerForest::new(); let mut vault_delta = AccountVaultDelta::default(); vault_delta.add_asset(asset).unwrap(); @@ -106,193 +126,239 @@ fn vault_partial_vs_full_state_produces_same_root() { dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); forest_partial.update_account(block_num, &partial_delta).unwrap(); - // Full-state delta (DB reconstruction) + // Approach 2: Full-state delta (simulates DB reconstruction) let mut forest_full = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[asset]); forest_full.update_account(block_num, &full_delta).unwrap(); - let root_partial = forest_partial.get_vault_root(account_id, block_num).unwrap(); - let root_full = forest_full.get_vault_root(account_id, block_num).unwrap(); + // Both approaches must produce identical vault roots + let root_partial = forest_partial.vault_roots.get(&(account_id, block_num)).unwrap(); + let root_full = forest_full.vault_roots.get(&(account_id, block_num)).unwrap(); assert_eq!(root_partial, root_full); - assert_ne!(root_partial, EMPTY_WORD); + assert_ne!(*root_partial, EMPTY_WORD); } #[test] -fn vault_incremental_updates_with_add_and_remove() { +fn test_incremental_vault_updates() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); + // Block 1: 100 tokens + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_1 = forest.vault_roots[&(account_id, block_1)]; + + // Block 2: 150 tokens (update) + let block_2 = block_1.child(); + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); + let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2).unwrap(); + let root_2 = forest.vault_roots[&(account_id, block_2)]; + + assert_ne!(root_1, root_2); +} + +#[test] +fn test_vault_state_persists_across_blocks_without_changes() { + // Regression test for issue #7: vault state should persist across blocks + // where no changes occur, not reset to empty. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Helper to query vault root at or before a block (range query) + let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { + forest + .vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) + }; + // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_100 = forest.get_vault_root(account_id, block_1).unwrap(); + let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; + + // Blocks 2-5: No changes to this account (simulated by not calling update_account) + // This means no entries are added to vault_roots for these blocks. + + // Block 6: Add 50 more tokens + // The previous root lookup should find block_1's root, not return empty. + let block_6 = BlockNumber::from(6); + let mut vault_delta_6 = AccountVaultDelta::default(); + vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); // 100 + 50 = 150 + let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); + forest.update_account(block_6, &delta_6).unwrap(); + + // The root at block 6 should be different from block 1 (we added more tokens) + let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; + assert_ne!(root_after_block_1, root_after_block_6); + + // Verify range query finds the correct previous root for intermediate blocks + // Block 3 should return block 1's root (most recent before block 3) + let root_at_block_3 = get_vault_root(&forest, account_id, BlockNumber::from(3)); + assert_eq!(root_at_block_3, Some(root_after_block_1)); + + // Block 5 should also return block 1's root + let root_at_block_5 = get_vault_root(&forest, account_id, BlockNumber::from(5)); + assert_eq!(root_at_block_5, Some(root_after_block_1)); + + // Block 6 should return block 6's root + let root_at_block_6 = get_vault_root(&forest, account_id, block_6); + assert_eq!(root_at_block_6, Some(root_after_block_6)); +} + +#[test] +fn test_partial_delta_applies_fungible_changes_correctly() { + // Regression test for issue #8: partial deltas should apply changes to previous balance, + // not treat amounts as absolute values. + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + // Block 1: Add 100 tokens (partial delta with +100) + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + let root_after_100 = forest.vault_roots[&(account_id, block_1)]; - // Block 2: Add 50 more tokens (result: 150 tokens) + // Block 2: Add 50 more tokens (partial delta with +50) + // Result should be 150 tokens, not 50 tokens let block_2 = block_1.child(); let mut vault_delta_2 = AccountVaultDelta::default(); vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); forest.update_account(block_2, &delta_2).unwrap(); - let root_after_150 = forest.get_vault_root(account_id, block_2).unwrap(); + let root_after_150 = forest.vault_roots[&(account_id, block_2)]; + // Roots should be different (100 tokens vs 150 tokens) assert_ne!(root_after_100, root_after_150); - // Block 3: Remove 30 tokens (result: 120 tokens) + // Block 3: Remove 30 tokens (partial delta with -30) + // Result should be 120 tokens let block_3 = block_2.child(); let mut vault_delta_3 = AccountVaultDelta::default(); vault_delta_3.remove_asset(dummy_fungible_asset(faucet_id, 30)).unwrap(); let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); forest.update_account(block_3, &delta_3).unwrap(); - let root_after_120 = forest.get_vault_root(account_id, block_3).unwrap(); + let root_after_120 = forest.vault_roots[&(account_id, block_3)]; + // Root should change again assert_ne!(root_after_150, root_after_120); - // Verify by comparing to full-state delta + // Verify by creating a fresh forest with a full-state delta of 120 tokens + // The roots should match let mut fresh_forest = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 120)]); fresh_forest.update_account(block_3, &full_delta).unwrap(); - let root_full_state_120 = fresh_forest.get_vault_root(account_id, block_3).unwrap(); + let root_full_state_120 = fresh_forest.vault_roots[&(account_id, block_3)]; assert_eq!(root_after_120, root_full_state_120); } #[test] -fn forest_versions_are_continuous_for_sequential_updates() { - use std::collections::BTreeMap; - - use assert_matches::assert_matches; - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let slot_name = StorageSlotName::mock(9); - let raw_key = Word::from([1u32, 0, 0, 0]); - let storage_key = StorageMap::hash_key(raw_key); - let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); - - for i in 1..=3u32 { - let block_num = BlockNumber::from(i); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta - .add_asset(dummy_fungible_asset(faucet_id, u64::from(i) * 10)) - .unwrap(); - - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(raw_key, Word::from([i, 0, 0, 0])); - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - - let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); - forest.update_account(block_num, &delta).unwrap(); - - let vault_tree = forest.tree_id_for_vault_root(account_id, block_num); - let storage_tree = forest.tree_id_for_root(account_id, &slot_name, block_num); - - assert_matches!(forest.forest.open(vault_tree, asset_key), Ok(_)); - assert_matches!(forest.forest.open(storage_tree, storage_key), Ok(_)); - } -} - -#[test] -fn vault_state_is_not_available_for_block_gaps() { +fn test_partial_delta_across_long_block_range() { + // Validation test: partial deltas should work across 101+ blocks. + // + // This test passes now because InnerForest keeps all history. Once pruning is implemented + // (estimated ~50 blocks), this test will fail unless DB fallback is also implemented. + // When that happens, the test should be updated to use DB fallback or converted to an + // integration test that has DB access. let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); + // Block 1: Add 1000 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); + let root_after_1000 = forest.vault_roots[&(account_id, block_1)]; - let block_6 = BlockNumber::from(6); - let mut vault_delta_6 = AccountVaultDelta::default(); - vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); - let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); - forest.update_account(block_6, &delta_6).unwrap(); + // Blocks 2-100: No changes to this account (simulating long gap) + + // Block 101: Add 500 more tokens (partial delta with +500) + // This requires looking up block 1's state across a 100-block gap. + let block_101 = BlockNumber::from(101); + let mut vault_delta_101 = AccountVaultDelta::default(); + vault_delta_101.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); + let delta_101 = + dummy_partial_delta(account_id, vault_delta_101, AccountStorageDelta::default()); + forest.update_account(block_101, &delta_101).unwrap(); + let root_after_1500 = forest.vault_roots[&(account_id, block_101)]; + + // Roots should be different (1000 tokens vs 1500 tokens) + assert_ne!(root_after_1000, root_after_1500); + + // Verify the final state matches a fresh forest with 1500 tokens + let mut fresh_forest = InnerForest::new(); + let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 1500)]); + fresh_forest.update_account(block_101, &full_delta).unwrap(); + let root_full_state_1500 = fresh_forest.vault_roots[&(account_id, block_101)]; - assert!(forest.get_vault_root(account_id, BlockNumber::from(3)).is_some()); - assert!(forest.get_vault_root(account_id, BlockNumber::from(5)).is_some()); - assert!(forest.get_vault_root(account_id, block_6).is_some()); + assert_eq!(root_after_1500, root_full_state_1500); } #[test] -fn witness_queries_work_with_sparse_lineage_updates() { +fn test_update_storage_map() { use std::collections::BTreeMap; - use assert_matches::assert_matches; use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; let mut forest = InnerForest::new(); let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let slot_name = StorageSlotName::mock(6); - let raw_key = Word::from([1u32, 0, 0, 0]); - let value = Word::from([9u32, 0, 0, 0]); - - let block_1 = BlockNumber::GENESIS.child(); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); - let mut map_delta_1 = StorageMapDelta::default(); - map_delta_1.insert(raw_key, value); - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); - let storage_delta_1 = AccountStorageDelta::from_raw(raw); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, storage_delta_1); - forest.update_account(block_1, &delta_1).unwrap(); - - let block_3 = block_1.child().child(); - let mut vault_delta_3 = AccountVaultDelta::default(); - vault_delta_3.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); - let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); - forest.update_account(block_3, &delta_3).unwrap(); + let block_num = BlockNumber::GENESIS.child(); - let block_2 = block_1.child(); - let asset_key = FungibleAsset::new(faucet_id, 0).unwrap().vault_key(); - let witnesses = forest - .get_vault_asset_witnesses(account_id, block_2, [asset_key].into()) - .unwrap(); - let proof: SmtProof = witnesses[0].clone().into(); - let root_at_2 = forest.get_vault_root(account_id, block_2).unwrap(); - assert_eq!(proof.compute_root(), root_at_2); + let slot_name = StorageSlotName::mock(3); + let key = Word::from([1u32, 2, 3, 4]); + let value = Word::from([5u32, 6, 7, 8]); - let storage_witness = forest - .get_storage_map_witness(account_id, &slot_name, block_2, raw_key) - .unwrap(); - let storage_root_at_2 = forest.get_storage_map_root(account_id, &slot_name, block_2).unwrap(); - let storage_proof: SmtProof = storage_witness.into(); - assert_eq!(storage_proof.compute_root(), storage_root_at_2); + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); - let storage_witness_at_3 = forest - .get_storage_map_witness(account_id, &slot_name, block_3, raw_key) - .unwrap(); - let storage_root_at_3 = forest.get_storage_map_root(account_id, &slot_name, block_3).unwrap(); - let storage_proof_at_3: SmtProof = storage_witness_at_3.into(); - assert_eq!(storage_proof_at_3.compute_root(), storage_root_at_3); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); - let vault_root_at_3 = forest.get_vault_root(account_id, block_3).unwrap(); - assert_matches!( + // Verify storage root was created + assert!( forest - .forest - .open(forest.tree_id_for_vault_root(account_id, block_3), asset_key.into()), - Ok(_) + .storage_map_roots + .contains_key(&(account_id, slot_name.clone(), block_num)) ); - assert_ne!(vault_root_at_3, InnerForest::empty_smt_root()); + let storage_root = forest.storage_map_roots[&(account_id, slot_name, block_num)]; + assert_ne!(storage_root, InnerForest::empty_smt_root()); } #[test] -fn vault_full_state_with_empty_vault_records_root() { +fn test_full_state_delta_with_empty_vault_records_root() { + // Regression test for issue #1581: full-state deltas with empty vaults must still record + // the vault root so that subsequent `get_vault_asset_witnesses` calls succeed. + // + // The network counter account from the network monitor has an empty vault (it only uses + // storage slots). Without this fix, `get_vault_asset_witnesses` fails with "root not found" + // because no vault root was ever recorded for the account. use miden_protocol::account::{Account, AccountStorage}; let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); + // Create a full-state delta with an empty vault (like the network counter account). let vault = AssetVault::new(&[]).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); @@ -300,14 +366,27 @@ fn vault_full_state_with_empty_vault_records_root() { let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); let full_delta = AccountDelta::try_from(account).unwrap(); + // Sanity check: the vault delta should be empty. assert!(full_delta.vault().is_empty()); assert!(full_delta.is_full_state()); forest.update_account(block_num, &full_delta).unwrap(); - let recorded_root = forest.get_vault_root(account_id, block_num); - assert_eq!(recorded_root, Some(InnerForest::empty_smt_root())); + // The vault root must be recorded even though the vault is empty. + assert!( + forest.vault_roots.contains_key(&(account_id, block_num)), + "vault root should be recorded for full-state deltas with empty vaults" + ); + + // Verify the recorded root is the empty SMT root. + let recorded_root = forest.vault_roots[&(account_id, block_num)]; + assert_eq!( + recorded_root, + InnerForest::empty_smt_root(), + "empty vault should have the empty SMT root" + ); + // Verify `get_vault_asset_witnesses` succeeds (returns empty witnesses for empty keys). let witnesses = forest .get_vault_asset_witnesses(account_id, block_num, std::collections::BTreeSet::new()) .expect("get_vault_asset_witnesses should succeed for accounts with empty vaults"); @@ -315,63 +394,7 @@ fn vault_full_state_with_empty_vault_records_root() { } #[test] -fn vault_shared_root_retained_when_one_entry_pruned() { - let mut forest = InnerForest::new(); - let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); - let faucet_id = dummy_faucet(); - let block_1 = BlockNumber::GENESIS.child(); - let asset_amount = u64::from(HISTORICAL_BLOCK_RETENTION); - let amount_increment = asset_amount / u64::from(HISTORICAL_BLOCK_RETENTION); - let asset = dummy_fungible_asset(faucet_id, asset_amount); - let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); - - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(asset).unwrap(); - let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); - - let mut vault_delta_2 = AccountVaultDelta::default(); - vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, asset_amount)).unwrap(); - let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_2).unwrap(); - - let root1 = forest.get_vault_root(account1, block_1).unwrap(); - let root2 = forest.get_vault_root(account2, block_1).unwrap(); - assert_eq!(root1, root2); - - let block_at_51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); - let mut vault_delta_2_update = AccountVaultDelta::default(); - vault_delta_2_update - .add_asset(dummy_fungible_asset(faucet_id, amount_increment)) - .unwrap(); - let delta_2_update = - dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); - forest.update_account(block_at_51, &delta_2_update).unwrap(); - - let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); - let total_roots_removed = forest.prune(block_at_52); - - assert_eq!(total_roots_removed, 0); - assert!(forest.get_vault_root(account1, block_1).is_some()); - assert!(forest.get_vault_root(account2, block_1).is_some()); - - let vault_root_at_52 = forest.get_vault_root(account1, block_at_52); - assert_eq!(vault_root_at_52, Some(root1)); - - let witnesses = forest - .get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()) - .unwrap(); - assert_eq!(witnesses.len(), 1); - let proof: SmtProof = witnesses[0].clone().into(); - assert_eq!(proof.compute_root(), root1); -} - -// STORAGE MAP TESTS -// ================================================================================================ - -#[test] -fn storage_map_incremental_updates() { +fn test_storage_map_incremental_updates() { use std::collections::BTreeMap; use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; @@ -394,9 +417,9 @@ fn storage_map_incremental_updates() { let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); forest.update_account(block_1, &delta_1).unwrap(); - let root_1 = forest.get_storage_map_root(account_id, &slot_name, block_1).unwrap(); + let root_1 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_1)]; - // Block 2: Insert key2 -> value2 + // Block 2: Insert key2 -> value2 (key1 should persist) let block_2 = block_1.child(); let mut map_delta_2 = StorageMapDelta::default(); map_delta_2.insert(key2, value2); @@ -404,7 +427,7 @@ fn storage_map_incremental_updates() { let storage_delta_2 = AccountStorageDelta::from_raw(raw_2); let delta_2 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_2); forest.update_account(block_2, &delta_2).unwrap(); - let root_2 = forest.get_storage_map_root(account_id, &slot_name, block_2).unwrap(); + let root_2 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_2)]; // Block 3: Update key1 -> value3 let block_3 = block_2.child(); @@ -414,65 +437,16 @@ fn storage_map_incremental_updates() { let storage_delta_3 = AccountStorageDelta::from_raw(raw_3); let delta_3 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_3); forest.update_account(block_3, &delta_3).unwrap(); - let root_3 = forest.get_storage_map_root(account_id, &slot_name, block_3).unwrap(); + let root_3 = forest.storage_map_roots[&(account_id, slot_name, block_3)]; + // All roots should be different assert_ne!(root_1, root_2); assert_ne!(root_2, root_3); assert_ne!(root_1, root_3); } #[test] -fn storage_map_state_is_not_available_for_block_gaps() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - const BLOCK_FIRST: u32 = 1; - const BLOCK_SECOND: u32 = 4; - const BLOCK_QUERY_ONE: u32 = 2; - const BLOCK_QUERY_TWO: u32 = 3; - const KEY_VALUE: u32 = 7; - const VALUE_FIRST: u32 = 10; - const VALUE_SECOND: u32 = 20; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let slot_name = StorageSlotName::mock(4); - let raw_key = Word::from([KEY_VALUE, 0, 0, 0]); - - let block_1 = BlockNumber::from(BLOCK_FIRST); - let mut map_delta_1 = StorageMapDelta::default(); - let value_1 = Word::from([VALUE_FIRST, 0, 0, 0]); - map_delta_1.insert(raw_key, value_1); - let raw_1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); - let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); - let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); - forest.update_account(block_1, &delta_1).unwrap(); - - let block_4 = BlockNumber::from(BLOCK_SECOND); - let mut map_delta_4 = StorageMapDelta::default(); - let value_2 = Word::from([VALUE_SECOND, 0, 0, 0]); - map_delta_4.insert(raw_key, value_2); - let raw_4 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_4))]); - let storage_delta_4 = AccountStorageDelta::from_raw(raw_4); - let delta_4 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_4); - forest.update_account(block_4, &delta_4).unwrap(); - - assert!( - forest - .get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_ONE)) - .is_some() - ); - assert!( - forest - .get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_TWO)) - .is_some() - ); - assert!(forest.get_storage_map_root(account_id, &slot_name, block_4).is_some()); -} - -#[test] -fn storage_map_empty_entries_query() { +fn test_empty_storage_map_entries_query() { use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::{ AccountBuilder, @@ -489,6 +463,7 @@ fn storage_map_empty_entries_query() { let block_num = BlockNumber::GENESIS.child(); let slot_name = StorageSlotName::mock(0); + // Create an account with an empty storage map slot let storage_map = StorageMap::with_entries(vec![]).unwrap(); let component_storage = vec![StorageSlot::with_map(slot_name.clone(), storage_map)]; @@ -508,427 +483,38 @@ fn storage_map_empty_entries_query() { .unwrap(); let account_id = account.id(); + + // Convert to full-state delta (this triggers insert_account_storage path) let full_delta = AccountDelta::try_from(account).unwrap(); - assert!(full_delta.is_full_state()); + assert!(full_delta.is_full_state(), "delta should be full-state"); + // Apply the delta forest.update_account(block_num, &full_delta).unwrap(); - let root = forest.get_storage_map_root(account_id, &slot_name, block_num); - assert_eq!(root, Some(InnerForest::empty_smt_root())); -} - -#[test] -fn storage_map_open_returns_proofs() { - use std::collections::BTreeMap; - - use assert_matches::assert_matches; - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let slot_name = StorageSlotName::mock(3); - let block_num = BlockNumber::GENESIS.child(); - - let mut map_delta = StorageMapDelta::default(); - for i in 0..20u32 { - let key = Word::from([i, 0, 0, 0]); - let value = Word::from([0, 0, 0, i]); - map_delta.insert(key, value); - } - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); - - let keys: Vec = (0..20u32).map(|i| Word::from([i, 0, 0, 0])).collect(); - let result = - forest.get_storage_map_details_for_keys(account_id, slot_name.clone(), block_num, &keys); - - let details = result.expect("Should return Some").expect("Should not error"); - assert_matches!(details.entries, StorageMapEntries::EntriesWithProofs(entries) => { - assert_eq!(entries.len(), keys.len()); - }); -} - -#[test] -fn storage_map_key_hashing_and_raw_entries_are_consistent() { - use std::collections::BTreeMap; - - use miden_protocol::account::StorageMap; - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - const SLOT_INDEX: usize = 4; - const KEY_VALUE: u32 = 11; - const VALUE_VALUE: u32 = 22; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let slot_name = StorageSlotName::mock(SLOT_INDEX); - let block_num = BlockNumber::GENESIS.child(); - let raw_key = Word::from([KEY_VALUE, 0, 0, 0]); - let value = Word::from([VALUE_VALUE, 0, 0, 0]); - - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(raw_key, value); - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); - - let root = forest.get_storage_map_root(account_id, &slot_name, block_num).unwrap(); - - let witness = forest - .get_storage_map_witness(account_id, &slot_name, block_num, raw_key) - .unwrap(); - let proof: SmtProof = witness.into(); - let hashed_key = StorageMap::hash_key(raw_key); - // Witness proofs use hashed keys because SMT leaves are keyed by the hash. - assert_eq!(proof.compute_root(), root); - assert_eq!(proof.get(&hashed_key), Some(value)); - // Raw keys never appear in SMT proofs, only their hashed counterparts. - assert_eq!(proof.get(&raw_key), None); -} - -// PRUNING TESTS -// ================================================================================================ - -const TEST_CHAIN_LENGTH: u32 = 100; -const TEST_AMOUNT_MULTIPLIER: u32 = 100; -const TEST_PRUNE_CHAIN_TIP: u32 = HISTORICAL_BLOCK_RETENTION + 5; - -#[test] -fn prune_handles_empty_forest() { - let mut forest = InnerForest::new(); - - let total_roots_removed = forest.prune(BlockNumber::GENESIS); - - assert_eq!(total_roots_removed, 0); -} - -#[test] -fn prune_removes_smt_roots_from_forest() { - use miden_protocol::account::delta::StorageMapDelta; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let slot_name = StorageSlotName::mock(7); - - for i in 1..=TEST_PRUNE_CHAIN_TIP { - let block_num = BlockNumber::from(i); - - let mut vault_delta = AccountVaultDelta::default(); - vault_delta - .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) - .unwrap(); - let storage_delta = if i.is_multiple_of(3) { - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(Word::from([1u32, 0, 0, 0]), Word::from([99u32, i, i * i, i * i * i])); - let asd = AccountStorageDelta::new(); - asd.add_updated_maps([(slot_name.clone(), map_delta)]) - } else { - AccountStorageDelta::default() - }; - - let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); - forest.update_account(block_num, &delta).unwrap(); - } - - let retained_block = BlockNumber::from(TEST_PRUNE_CHAIN_TIP); - let pruned_block = BlockNumber::from(3u32); - - let total_roots_removed = forest.prune(retained_block); - assert_eq!(total_roots_removed, 0); - assert!(forest.get_vault_root(account_id, retained_block).is_some()); - assert!(forest.get_vault_root(account_id, pruned_block).is_none()); - assert!(forest.get_storage_map_root(account_id, &slot_name, pruned_block).is_none()); - assert!(forest.get_storage_map_root(account_id, &slot_name, retained_block).is_some()); - - let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); - let retained_tree = forest.tree_id_for_vault_root(account_id, retained_block); - let pruned_tree = forest.tree_id_for_vault_root(account_id, pruned_block); - assert_matches!(forest.forest.open(retained_tree, asset_key), Ok(_)); - assert_matches!(forest.forest.open(pruned_tree, asset_key), Err(_)); - - let storage_key = StorageMap::hash_key(Word::from([1u32, 0, 0, 0])); - let storage_tree = forest.tree_id_for_root(account_id, &slot_name, pruned_block); - assert_matches!(forest.forest.open(storage_tree, storage_key), Err(_)); -} - -#[test] -fn prune_respects_retention_boundary() { - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - - for i in 1..=HISTORICAL_BLOCK_RETENTION { - let block_num = BlockNumber::from(i); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta - .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) - .unwrap(); - let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); - forest.update_account(block_num, &delta).unwrap(); - } - - let total_roots_removed = forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); - - assert_eq!(total_roots_removed, 0); - assert_eq!(forest.forest.tree_count(), 11); -} - -#[test] -fn prune_roots_removes_old_entries() { - use miden_protocol::account::delta::StorageMapDelta; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - - let faucet_id = dummy_faucet(); - let slot_name = StorageSlotName::mock(3); - - for i in 1..=TEST_CHAIN_LENGTH { - let block_num = BlockNumber::from(i); - let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); - - let key = Word::from([i, i * i, 5, 4]); - let value = Word::from([0, 0, i * i * i, 77]); - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key, value); - let storage_delta = - AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); - - let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); - forest.update_account(block_num, &delta).unwrap(); - } - - assert_eq!(forest.forest.tree_count(), 22); - - let total_roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - - assert_eq!(total_roots_removed, 0); - - assert_eq!(forest.forest.tree_count(), 22); -} - -#[test] -fn prune_handles_multiple_accounts() { - let mut forest = InnerForest::new(); - let account1 = dummy_account(); - let account2 = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); - let faucet_id = dummy_faucet(); - - for i in 1..=TEST_CHAIN_LENGTH { - let block_num = BlockNumber::from(i); - let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); - - let mut vault_delta1 = AccountVaultDelta::default(); - vault_delta1.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); - let delta1 = dummy_partial_delta(account1, vault_delta1, AccountStorageDelta::default()); - forest.update_account(block_num, &delta1).unwrap(); - - let mut vault_delta2 = AccountVaultDelta::default(); - vault_delta2.add_asset(dummy_fungible_asset(account2, amount * 2)).unwrap(); - let delta2 = dummy_partial_delta(account2, vault_delta2, AccountStorageDelta::default()); - forest.update_account(block_num, &delta2).unwrap(); - } - - assert_eq!(forest.forest.tree_count(), 22); - - let total_roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - - let expected_removed_per_account = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; - assert_eq!(total_roots_removed, 0); - assert!(total_roots_removed <= expected_removed_per_account * 2); - - assert_eq!(forest.forest.tree_count(), 22); -} - -#[test] -fn prune_handles_multiple_slots() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let slot_a = StorageSlotName::mock(1); - let slot_b = StorageSlotName::mock(2); - - for i in 1..=TEST_CHAIN_LENGTH { - let block_num = BlockNumber::from(i); - let mut map_delta_a = StorageMapDelta::default(); - map_delta_a.insert(Word::from([i, 0, 0, 0]), Word::from([i, 0, 0, 1])); - let mut map_delta_b = StorageMapDelta::default(); - map_delta_b.insert(Word::from([i, 0, 0, 2]), Word::from([i, 0, 0, 3])); - let raw = BTreeMap::from_iter([ - (slot_a.clone(), StorageSlotDelta::Map(map_delta_a)), - (slot_b.clone(), StorageSlotDelta::Map(map_delta_b)), - ]); - let storage_delta = AccountStorageDelta::from_raw(raw); - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); - } - - assert_eq!(forest.forest.tree_count(), 22); - - let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); - let total_roots_removed = forest.prune(chain_tip); - - assert_eq!(total_roots_removed, 0); - - assert_eq!(forest.forest.tree_count(), 22); -} - -#[test] -fn prune_preserves_most_recent_state_per_entity() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let slot_map_a = StorageSlotName::mock(1); - let slot_map_b = StorageSlotName::mock(2); - - // Block 1: Create vault + map_a + map_b - let block_1 = BlockNumber::from(1); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); - - let mut map_delta_a = StorageMapDelta::default(); - map_delta_a.insert(Word::from([1u32, 0, 0, 0]), Word::from([100u32, 0, 0, 0])); - - let mut map_delta_b = StorageMapDelta::default(); - map_delta_b.insert(Word::from([2u32, 0, 0, 0]), Word::from([200u32, 0, 0, 0])); - - let raw = BTreeMap::from_iter([ - (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), - (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), - ]); - let storage_delta_1 = AccountStorageDelta::from_raw(raw); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, storage_delta_1); - forest.update_account(block_1, &delta_1).unwrap(); - - // Block 51: Update only map_a - let block_at_51 = BlockNumber::from(51); - let mut map_delta_a_new = StorageMapDelta::default(); - map_delta_a_new.insert(Word::from([1u32, 0, 0, 0]), Word::from([999u32, 0, 0, 0])); - - let raw_at_51 = - BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_new))]); - let storage_delta_at_51 = AccountStorageDelta::from_raw(raw_at_51); - let delta_at_51 = - dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_at_51); - forest.update_account(block_at_51, &delta_at_51).unwrap(); - - // Block 100: Prune - let block_100 = BlockNumber::from(100); - let total_roots_removed = forest.prune(block_100); - - assert_eq!(total_roots_removed, 0); - - assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_at_51).is_some()); - assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_1).is_some()); - assert!(forest.get_storage_map_root(account_id, &slot_map_b, block_1).is_some()); -} - -#[test] -fn prune_preserves_entries_within_retention_window() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let slot_map = StorageSlotName::mock(1); - - let blocks = [1, 25, 50, 75, 100]; - - for &block_num in &blocks { - let block = BlockNumber::from(block_num); - - let mut vault_delta = AccountVaultDelta::default(); - vault_delta - .add_asset(dummy_fungible_asset(faucet_id, u64::from(block_num) * 100)) - .unwrap(); - - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(Word::from([block_num, 0, 0, 0]), Word::from([block_num * 10, 0, 0, 0])); + // Verify storage_map_roots has an entry + assert!( + forest + .storage_map_roots + .contains_key(&(account_id, slot_name.clone(), block_num)), + "storage_map_roots should have an entry for the empty map" + ); - let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); - forest.update_account(block, &delta).unwrap(); + // Verify storage_map_entries returns Some (not None) - this is the bug fix validation + let result = forest.storage_map_entries(account_id, slot_name.clone(), block_num); + assert!(result.is_some(), "storage_map_entries should return Some for empty maps"); + + // Verify the entries are empty + let details = result.unwrap(); + assert_eq!(details.slot_name, slot_name); + match details.entries { + StorageMapEntries::AllEntries(entries) => { + assert!(entries.is_empty(), "entries should be empty for an empty map"); + }, + StorageMapEntries::LimitExceeded => { + panic!("should not exceed limit for empty map"); + }, + StorageMapEntries::EntriesWithProofs(_) => { + panic!("should not have proofs for empty map query"); + }, } - - // Block 100: Prune (retention window = 50 blocks, cutoff = 50) - let block_100 = BlockNumber::from(100); - let total_roots_removed = forest.prune(block_100); - - // Blocks 1 and 25 pruned (outside retention, have newer entries) - assert_eq!(total_roots_removed, 4); - - assert!(forest.get_vault_root(account_id, BlockNumber::from(1)).is_none()); - assert!(forest.get_vault_root(account_id, BlockNumber::from(25)).is_none()); - assert!(forest.get_vault_root(account_id, BlockNumber::from(50)).is_some()); - assert!(forest.get_vault_root(account_id, BlockNumber::from(75)).is_some()); - assert!(forest.get_vault_root(account_id, BlockNumber::from(100)).is_some()); -} - -/// Two accounts start with identical vault roots (same asset amount). When one account changes -/// in the next block, verify the unchanged account's vault root still works for lookups and -/// witness generation. -#[test] -fn shared_vault_root_retained_when_one_account_changes() { - let mut forest = InnerForest::new(); - let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); - let faucet_id = dummy_faucet(); - - // Block 1: Both accounts have identical vaults (same asset) - let block_1 = BlockNumber::GENESIS.child(); - let initial_amount = 1000u64; - let asset = dummy_fungible_asset(faucet_id, initial_amount); - let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); - - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(asset).unwrap(); - let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); - - let mut vault_delta_2 = AccountVaultDelta::default(); - vault_delta_2 - .add_asset(dummy_fungible_asset(faucet_id, initial_amount)) - .unwrap(); - let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_2).unwrap(); - - // Both accounts should have the same vault root (structural sharing in SmtForest) - let root1_at_block1 = forest.get_vault_root(account1, block_1).unwrap(); - let root2_at_block1 = forest.get_vault_root(account2, block_1).unwrap(); - assert_eq!(root1_at_block1, root2_at_block1, "identical vaults should have identical roots"); - - // Block 2: Only account2 changes (adds more assets) - let block_2 = block_1.child(); - let mut vault_delta_2_update = AccountVaultDelta::default(); - vault_delta_2_update.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); - let delta_2_update = - dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); - forest.update_account(block_2, &delta_2_update).unwrap(); - - // Account2 now has a different root - let root2_at_block2 = forest.get_vault_root(account2, block_2).unwrap(); - assert_ne!(root2_at_block1, root2_at_block2, "account2 vault should have changed"); - - assert!(forest.get_vault_root(account1, block_2).is_some()); - - let witnesses = forest - .get_vault_asset_witnesses(account1, block_2, [asset_key].into()) - .expect("witness generation should succeed for prior version"); - assert_eq!(witnesses.len(), 1); } diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 4171053fe9..504ea06313 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -216,7 +216,7 @@ pub async fn load_mmr(db: &mut Db) -> Result = body + let duplicate_nullifiers: Vec<_> = block + .body() .created_nullifiers() .iter() .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) @@ -305,7 +304,11 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + block + .body() + .created_nullifiers() + .iter() + .map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -322,7 +325,9 @@ impl State { let account_tree_update = inner .account_tree .compute_mutations( - body.updated_accounts() + block + .body() + .updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), ) @@ -350,13 +355,14 @@ impl State { ) }; - // Build note tree - let note_tree = body.compute_block_note_tree(); + // build note tree + let note_tree = block.body().compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } - let notes = body + let notes = block + .body() .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -395,12 +401,12 @@ impl State { // Extract public account updates with deltas before block is moved into async task. // Private accounts are filtered out since they don't expose their state changes. let account_deltas = - Vec::from_iter(body.updated_accounts().iter().filter_map( - |update| match update.details() { + Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { + match update.details() { AccountUpdateDetails::Delta(delta) => Some(delta.clone()), AccountUpdateDetails::Private => None, - }, - )); + } + })); // The DB and in-memory state updates need to be synchronized and are partially // overlapping. Namely, the DB transaction only proceeds after this task acquires the @@ -465,8 +471,7 @@ impl State { .in_current_span() .await?; - let mut forest = self.forest.write().await; - forest.apply_block_updates(block_num, account_deltas)?; + self.forest.write().await.apply_block_updates(block_num, account_deltas)?; info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); @@ -1050,8 +1055,7 @@ impl State { /// /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. /// Returns an error if the forest doesn't have data for the requested slot. - /// All-entries queries (`SlotData::All`) use the forest to request all entries database. - #[allow(clippy::too_many_lines)] + /// All-entries queries (`SlotData::All`) use the forest to return all entries. async fn fetch_public_account_details( &self, account_id: AccountId, @@ -1102,73 +1106,29 @@ impl State { let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); - let mut map_keys_requests = Vec::new(); - let mut all_entries_requests = Vec::new(); - let mut storage_request_slots = Vec::with_capacity(storage_requests.len()); - for (index, StorageMapRequest { slot_name, slot_data }) in - storage_requests.into_iter().enumerate() - { - storage_request_slots.push(slot_name.clone()); - match slot_data { - SlotData::MapKeys(keys) => { - map_keys_requests.push((index, slot_name, keys)); - }, - SlotData::All => { - all_entries_requests.push((index, slot_name)); - }, - } - } + // Use forest for storage map queries + let forest_guard = self.forest.read().await; - let mut storage_map_details_by_index = vec![None; storage_request_slots.len()]; - - if !map_keys_requests.is_empty() { - let forest_guard = self.forest.read().await; - for (index, slot_name, keys) in map_keys_requests { - let details = forest_guard - .get_storage_map_details_for_keys( + for StorageMapRequest { slot_name, slot_data } in storage_requests { + let details = match &slot_data { + SlotData::MapKeys(keys) => forest_guard + .open_storage_map(account_id, slot_name.clone(), block_num, keys) + .ok_or_else(|| DatabaseError::StorageRootNotFound { account_id, - slot_name.clone(), + slot_name: slot_name.to_string(), block_num, - &keys, - ) + })? + .map_err(DatabaseError::MerkleError)?, + SlotData::All => forest_guard + .storage_map_entries(account_id, slot_name.clone(), block_num) .ok_or_else(|| DatabaseError::StorageRootNotFound { account_id, slot_name: slot_name.to_string(), block_num, - })? - .map_err(DatabaseError::MerkleError)?; - storage_map_details_by_index[index] = Some(details); - } - } - - // TODO parallelize the read requests - for (index, slot_name) in all_entries_requests { - let details = self - .db - .reconstruct_storage_map_from_db( - account_id, - slot_name.clone(), - block_num, - Some( - // TODO unify this with - // `AccountStorageMapDetails::MAX_RETURN_ENTRIES` - // and accumulated the limits - ::LIMIT, - ), - ) - .await?; - storage_map_details_by_index[index] = Some(details); - } + })?, + }; - for (details, slot_name) in - storage_map_details_by_index.into_iter().zip(storage_request_slots) - { - let details = details.ok_or_else(|| DatabaseError::StorageRootNotFound { - account_id, - slot_name: slot_name.to_string(), - block_num, - })?; storage_map_details.push(details); } @@ -1189,7 +1149,7 @@ impl State { account_id: AccountId, block_range: RangeInclusive, ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range, None).await + self.db.select_storage_map_sync_values(account_id, block_range).await } /// Loads a block from the block store. Return `Ok(None)` if the block is not found. From 3d19051baba1560680ae85fc5ac7cc2e69fae278 Mon Sep 17 00:00:00 2001 From: igamigo Date: Thu, 12 Mar 2026 18:48:08 -0300 Subject: [PATCH 7/8] chore(ntx): log check and transaction results (#1787) --- CHANGELOG.md | 1 + crates/ntx-builder/src/actor/execute.rs | 9 +++++++++ crates/ntx-builder/src/actor/mod.rs | 22 +++++++++++++++++++++- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fbccbf3d26..f2fc6e99a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - Private notes with the network note attachment are no longer incorrectly considered as network notes (#[#1736](https://github.com/0xMiden/node/pull/1736)). - Fixed network monitor looping on stale wallet nonce after node restarts by re-syncing wallet state from RPC after repeated failures ([#1748](https://github.com/0xMiden/node/pull/1748)). +- Added verbose `info!`-level logging to the network transaction builder for transaction execution, note filtering failures, and transaction outcomes ([#1770](https://github.com/0xMiden/node/pull/1770)). ## v0.13.7 (2026-02-25) diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index edcf58c07e..87e6dbdc96 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -233,6 +233,15 @@ impl NtxContext { .await { Ok(NoteConsumptionInfo { successful, failed, .. }) => { + for failed_note in &failed { + tracing::info!( + note_id = %failed_note.note.id(), + nullifier = %failed_note.note.nullifier(), + err = %failed_note.error, + "note failed consumability check", + ); + } + // Map successful notes to input notes. let successful = InputNotes::from_unauthenticated_notes(successful) .map_err(NtxError::InputNotes)?; diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index ae8f63629e..edf88eda61 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -296,6 +296,15 @@ impl AccountActor { ); let notes = tx_candidate.notes.clone(); + let account_id = tx_candidate.account.id(); + let note_ids: Vec<_> = notes.iter().map(|n| n.to_inner().id()).collect(); + tracing::info!( + %account_id, + ?note_ids, + num_notes = notes.len(), + "executing network transaction", + ); + let execution_result = context.execute_transaction(tx_candidate).await; match execution_result { // Execution completed without failed notes. @@ -304,13 +313,24 @@ impl AccountActor { }, // Execution completed with some failed notes. Ok((tx_id, failed)) => { + tracing::info!( + %account_id, + %tx_id, + num_failed = failed.len(), + "network transaction executed with some failed notes", + ); let notes = failed.into_iter().map(|note| note.note).collect::>(); state.notes_failed(notes.as_slice(), block_num); self.mode = ActorMode::TransactionInflight(tx_id); }, // Transaction execution failed. Err(err) => { - tracing::error!(err = err.as_report(), "network transaction failed"); + tracing::error!( + %account_id, + ?note_ids, + err = err.as_report(), + "network transaction failed", + ); self.mode = ActorMode::NoViableNotes; let notes = notes.into_iter().map(|note| note.into_inner().into()).collect::>(); From 187405567ee771c8b931ffba57663010458403da Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Thu, 12 Mar 2026 14:59:00 -0700 Subject: [PATCH 8/8] chore: increment crate versions to v0.13.8 --- CHANGELOG.md | 2 +- Cargo.lock | 28 ++++++++++++++-------------- Cargo.toml | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2fc6e99a8..bc557ab077 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## v0.13.8 (TBD) +## v0.13.8 (2026-03-12) - Private notes with the network note attachment are no longer incorrectly considered as network notes (#[#1736](https://github.com/0xMiden/node/pull/1736)). - Fixed network monitor looping on stale wallet nonce after node restarts by re-syncing wallet state from RPC after repeated failures ([#1748](https://github.com/0xMiden/node/pull/1748)). diff --git a/Cargo.lock b/Cargo.lock index 24936392a0..001c8e632e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2765,7 +2765,7 @@ dependencies = [ [[package]] name = "miden-network-monitor" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "axum", @@ -2793,7 +2793,7 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "clap 4.5.54", @@ -2814,7 +2814,7 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "assert_matches", @@ -2850,7 +2850,7 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.13.7" +version = "0.13.8" dependencies = [ "quote", "syn 2.0.114", @@ -2858,7 +2858,7 @@ dependencies = [ [[package]] name = "miden-node-ntx-builder" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "futures", @@ -2883,7 +2883,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "assert_matches", @@ -2907,7 +2907,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.13.7" +version = "0.13.8" dependencies = [ "fs-err", "miette", @@ -2917,7 +2917,7 @@ dependencies = [ [[package]] name = "miden-node-rpc" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "futures", @@ -2949,7 +2949,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "assert_matches", @@ -2988,7 +2988,7 @@ dependencies = [ [[package]] name = "miden-node-stress-test" -version = "0.13.7" +version = "0.13.8" dependencies = [ "clap 4.5.54", "fs-err", @@ -3018,7 +3018,7 @@ dependencies = [ [[package]] name = "miden-node-utils" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "bytes", @@ -3051,7 +3051,7 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "miden-node-proto", @@ -3146,7 +3146,7 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.13.7" +version = "0.13.8" dependencies = [ "anyhow", "async-trait", @@ -3192,7 +3192,7 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.13.7" +version = "0.13.8" dependencies = [ "fs-err", "getrandom 0.3.4", diff --git a/Cargo.toml b/Cargo.toml index a1a9387756..4042e3cb7b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" rust-version = "1.90" -version = "0.13.7" +version = "0.13.8" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto]