From 4d0c39be4e5ffe7c4b61996de4df7a858b87b0cd Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 10 Feb 2026 10:54:20 +0100 Subject: [PATCH 01/18] y --- CHANGELOG.md | 1 + Cargo.lock | 10 + crates/store/Cargo.toml | 1 + crates/store/src/db/mod.rs | 39 ++ crates/store/src/db/tests.rs | 907 ++++++++++++++++++++++++- crates/store/src/inner_forest/mod.rs | 326 +++++++-- crates/store/src/inner_forest/tests.rs | 781 +++++++++++++++------ crates/store/src/state/loader.rs | 2 +- crates/store/src/state/mod.rs | 69 +- 9 files changed, 1835 insertions(+), 301 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 048d156b5..d3e163b2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ ### Enhancements +- Added cleanup of old account data from the in-memory forest ([#1175](https://github.com/0xMiden/miden-node/issues/1175)). - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). diff --git a/Cargo.lock b/Cargo.lock index 8f7601604..e6313649b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2405,6 +2405,15 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + [[package]] name = "lru" version = "0.14.0" @@ -2910,6 +2919,7 @@ dependencies = [ "hex", "indexmap 2.13.0", "libsqlite3-sys", + "lru 0.12.5", "miden-crypto", "miden-node-proto", "miden-node-proto-build", diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 1c62c7ab7..7f296d2e0 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -25,6 +25,7 @@ fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } libsqlite3-sys = { workspace = true } +lru = { version = "0.12" } miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fc4a5cab..7bed9f0ef 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -607,6 +607,45 @@ impl Db { .await } + /// Reconstructs storage map details from the database for a specific slot at a block. + /// + /// Used as fallback when `InnerForest` cache misses (historical or evicted queries). + /// Rebuilds all entries by querying the DB and filtering to the specific slot. + pub(crate) async fn reconstruct_storage_map_from_db( + &self, + account_id: AccountId, + slot_name: miden_protocol::account::StorageSlotName, + block_num: BlockNumber, + ) -> Result { + use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; + use miden_protocol::EMPTY_WORD; + + let values = self + .select_storage_map_sync_values(account_id, BlockNumber::GENESIS..=block_num) + .await?; + + // Filter to the specific slot and collect latest values per key + let mut latest_values: BTreeMap = BTreeMap::new(); + for value in values.values { + if value.slot_name == slot_name { + latest_values.insert(value.key, value.value); + } + } + + // Remove EMPTY_WORD entries (deletions) + latest_values.retain(|_, v| *v != EMPTY_WORD); + + if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Ok(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::LimitExceeded, + }); + } + + let entries = Vec::from_iter(latest_values.into_iter()); + Ok(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) + } + /// Emits size metrics for each table in the database, and the entire database. #[instrument(target = COMPONENT, skip_all, err)] pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 2749c9903..c8e54e647 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -4,6 +4,7 @@ use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; +use assert_matches::assert_matches; use diesel::{Connection, SqliteConnection}; use miden_node_proto::domain::account::AccountSummary; use miden_node_utils::fee::{test_fee, test_fee_params}; @@ -36,6 +37,7 @@ use miden_protocol::block::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::crypto::rand::RpoRandomCoin; use miden_protocol::note::{ Note, @@ -2244,7 +2246,7 @@ fn db_roundtrip_account_storage_with_maps() { #[test] #[miden_node_test_macro::enable_logging] -fn test_note_metadata_with_attachment_roundtrip() { +fn db_roundtrip_note_metadata_attachment() { let mut conn = create_db(); let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); @@ -2295,3 +2297,906 @@ fn test_note_metadata_with_attachment_roundtrip() { "NetworkAccountTarget should have the correct target account ID" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_matches_db_storage_map_roots_across_updates() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + use miden_protocol::crypto::merkle::smt::Smt; + + use crate::inner_forest::InnerForest; + + /// Reconstructs storage map root from DB entries at a specific block. + fn reconstruct_storage_map_root_from_db( + conn: &mut SqliteConnection, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + ) -> Option { + let storage_values = queries::select_account_storage_map_values( + conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + // Filter to the specific slot and get most recent value for each key + let mut latest_values: BTreeMap = BTreeMap::new(); + for value in storage_values.values { + if value.slot_name == *slot_name { + latest_values.insert(value.key, value.value); + } + } + + if latest_values.is_empty() { + return None; + } + + // Build SMT from entries + let entries: Vec<(Word, Word)> = latest_values + .into_iter() + .filter_map(|(key, value)| { + if value == EMPTY_WORD { + None + } else { + // Keys are stored unhashed in DB, match InnerForest behavior + Some((key, value)) + } + }) + .collect(); + + if entries.is_empty() { + use miden_protocol::crypto::merkle::EmptySubtreeRoots; + use miden_protocol::crypto::merkle::smt::SMT_DEPTH; + return Some(*EmptySubtreeRoots::entry(SMT_DEPTH, 0)); + } + + let mut smt = Smt::default(); + for (key, value) in entries { + smt.insert(key, value).unwrap(); + } + + Some(smt.root()) + } + + let mut conn = create_db(); + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + create_block(&mut conn, block1); + create_block(&mut conn, block2); + create_block(&mut conn, block3); + + let slot_map = StorageSlotName::mock(1); + let slot_value = StorageSlotName::mock(2); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + let value3 = num_to_word(3000); + + // Block 1: Add storage map entries and a storage value + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(key1, value1); + map_delta_1.insert(key2, value2); + + let raw_1 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_1)), + (slot_value.clone(), StorageSlotDelta::Value(value1)), + ]); + let storage_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = + AccountDelta::new(account_id, storage_1.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block1, &delta_1); + forest.update_account(block1, &delta_1).unwrap(); + + // Verify forest matches DB for block 1 + let forest_root_1 = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); + let db_root_1 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_1, db_root_1, + "Storage map root at block 1 should match between InnerForest and DB" + ); + + // Block 2: Delete storage map entry (set to EMPTY_WORD) and delete storage value + let mut map_delta_2 = StorageMapDelta::default(); + map_delta_2.insert(key1, EMPTY_WORD); + + let raw_2 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_2)), + (slot_value.clone(), StorageSlotDelta::Value(EMPTY_WORD)), + ]); + let storage_2 = AccountStorageDelta::from_raw(raw_2); + let delta_2 = AccountDelta::new( + account_id, + storage_2.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block2, &delta_2); + forest.update_account(block2, &delta_2).unwrap(); + + // Verify forest matches DB for block 2 + let forest_root_2 = forest.get_storage_map_root(account_id, &slot_map, block2).unwrap(); + let db_root_2 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block2) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_2, db_root_2, + "Storage map root at block 2 should match between InnerForest and DB" + ); + + // Block 3: Re-add same value as block 1 and add different map entry + let mut map_delta_3 = StorageMapDelta::default(); + map_delta_3.insert(key2, value3); // Update existing key + + let raw_3 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_3)), + (slot_value.clone(), StorageSlotDelta::Value(value1)), // Same as block 1 + ]); + let storage_3 = AccountStorageDelta::from_raw(raw_3); + let delta_3 = AccountDelta::new( + account_id, + storage_3.clone(), + AccountVaultDelta::default(), + Felt::new(3), + ) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block3, &delta_3); + forest.update_account(block3, &delta_3).unwrap(); + + // Verify forest matches DB for block 3 + let forest_root_3 = forest.get_storage_map_root(account_id, &slot_map, block3).unwrap(); + let db_root_3 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block3) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_3, db_root_3, + "Storage map root at block 3 should match between InnerForest and DB" + ); + + // Verify we can query historical roots + let forest_root_1_check = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); + let db_root_1_check = + reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) + .expect("DB should have storage map root"); + assert_eq!( + forest_root_1_check, db_root_1_check, + "Historical query for block 1 should match" + ); + + // Verify roots are different across blocks (since we modified the map) + assert_ne!(forest_root_1, forest_root_2, "Roots should differ after deletion"); + assert_ne!(forest_root_2, forest_root_3, "Roots should differ after modification"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_shared_roots_not_deleted_prematurely() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + use miden_protocol::testing::account_id::{ + ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, + }; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let account3 = AccountId::try_from(ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE).unwrap(); + + let block1 = BlockNumber::from(1); + let slot_name = StorageSlotName::mock(1); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + + // All three accounts add identical storage maps at block 1 + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + map_delta.insert(key2, value2); + + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta.clone()))]); + let storage = AccountStorageDelta::from_raw(raw); + + // Account 1 + let delta1 = + AccountDelta::new(account1, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block1, &delta1).unwrap(); + + // Account 2 (same storage) + let delta2 = + AccountDelta::new(account2, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block1, &delta2).unwrap(); + + // Account 3 (same storage) + let delta3 = + AccountDelta::new(account3, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block1, &delta3).unwrap(); + + // All three accounts should have the same root (structural sharing in SmtForest) + let root1 = forest.get_storage_map_root(account1, &slot_name, block1).unwrap(); + let root2 = forest.get_storage_map_root(account2, &slot_name, block1).unwrap(); + let root3 = forest.get_storage_map_root(account3, &slot_name, block1).unwrap(); + + assert_eq!(root1, root2, "Identical maps should have same root"); + assert_eq!(root2, root3, "Identical maps should have same root"); + + // Verify we can get witnesses for all three accounts and verify them against roots + let witness1 = forest + .get_storage_map_witness(account1, &slot_name, block1, key1) + .expect("Account1 should have accessible storage map"); + let witness2 = forest + .get_storage_map_witness(account2, &slot_name, block1, key1) + .expect("Account2 should have accessible storage map"); + let witness3 = forest + .get_storage_map_witness(account3, &slot_name, block1, key1) + .expect("Account3 should have accessible storage map"); + + // Verify witnesses against storage map roots using SmtProof::compute_root + let proof1: SmtProof = witness1.into(); + assert_eq!(proof1.compute_root(), root1, "Witness1 must verify against root1"); + + let proof2: SmtProof = witness2.into(); + assert_eq!(proof2.compute_root(), root2, "Witness2 must verify against root2"); + + let proof3: SmtProof = witness3.into(); + assert_eq!(proof3.compute_root(), root3, "Witness3 must verify against root3"); + + // Now prune account1's storage (simulate it being old enough to prune) + // This should NOT affect account2 and account3 + let block_to_prune = block1; + let (_, storage_roots_removed, _) = forest.prune(block_to_prune); + + // No roots should be removed since block1 is at the chain tip + assert_eq!(storage_roots_removed, 0, "No roots should be pruned at chain tip"); + + // Advance chain and create a scenario where only account1 is old + let block51 = BlockNumber::from(51); + let block52 = BlockNumber::from(52); + + // Update account2 at block 51 (keeps it recent) + let mut map_delta_update = StorageMapDelta::default(); + map_delta_update.insert(key1, num_to_word(1001)); // Slight change + let raw_update = + BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_update))]); + let storage_update = AccountStorageDelta::from_raw(raw_update); + let delta2_update = AccountDelta::new( + account2, + storage_update.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + forest.update_account(block51, &delta2_update).unwrap(); + + // Update account3 at block 52 (keeps it recent) + let delta3_update = + AccountDelta::new(account3, storage_update, AccountVaultDelta::default(), Felt::new(2)) + .unwrap(); + forest.update_account(block52, &delta3_update).unwrap(); + + // Prune at block 52 + // - Account1: block 1 is most recent, should NOT be pruned + // - Account2: block 1 is old (block 51 is newer), should be pruned + // - Account3: block 1 is old (block 52 is newer), should be pruned + let (_, storage_roots_removed, storage_entries_removed) = forest.prune(block52); + + assert_eq!( + storage_roots_removed, 2, + "Should prune accounts 2 and 3's old storage roots from block 1 (account1's block 1 is most recent)" + ); + assert_eq!( + storage_entries_removed, 0, + "Storage entries are LRU-cached, not counted in prune results" + ); + + // Account1 at block1 should STILL be accessible (it's the most recent for account1) + let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block1); + assert!( + account1_root_after_prune.is_some(), + "Account1's block 1 root should NOT be pruned (it's the most recent for account1)" + ); + + // Account2 and Account3 should still be accessible at their recent blocks + let account2_root = forest.get_storage_map_root(account2, &slot_name, block51).unwrap(); + let account3_root = forest.get_storage_map_root(account3, &slot_name, block52).unwrap(); + + // Verify we can still get witnesses for account2 and account3 and verify against roots + let witness2_after = forest + .get_storage_map_witness(account2, &slot_name, block51, key1) + .expect("Account2 should still have accessible storage map after pruning account1"); + let witness3_after = forest + .get_storage_map_witness(account3, &slot_name, block52, key1) + .expect("Account3 should still have accessible storage map after pruning account1"); + + // Verify witnesses against storage map roots + let proof2: SmtProof = witness2_after.into(); + assert_eq!( + proof2.compute_root(), + account2_root, + "Witness2 must verify against account2_root" + ); + + let proof3: SmtProof = witness3_after.into(); + assert_eq!( + proof3.compute_root(), + account3_root, + "Witness3 must verify against account3_root" + ); + + // The shared root should still be usable for account2 and account3 + // Even though account1's reference was removed, the SMT in the forest is preserved + // by the references from account2 and account3 + assert_ne!(account2_root, root1, "Account2's root should be different after update"); + assert_ne!(account3_root, root1, "Account3's root should be different after update"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_retains_latest_after_100_blocks_and_pruning() { + use std::collections::BTreeMap; + + use miden_node_proto::domain::account::StorageMapEntries; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::{HISTORICAL_BLOCK_RETENTION, InnerForest}; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let slot_map = StorageSlotName::mock(1); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + + // Block 1: Apply initial update with vault and storage + let block_1 = BlockNumber::from(1); + + // Create storage map with two entries + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + map_delta.insert(key2, value2); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + // Create vault with one asset + let asset = FungibleAsset::new(faucet_id, 100).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + // Capture the roots from block 1 + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + let initial_storage_map_root = + forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); + + // Blocks 2-100: Do nothing (no updates to this account) + // Simulate other activity by just advancing to block 100 + + let block_100 = BlockNumber::from(100); + + // Before pruning, verify we can still query block 1's data at block 100 + // (range query finds most recent at or before block 100) + let vault_root_before_prune = forest.get_vault_root(account_id, block_100); + assert_eq!( + vault_root_before_prune, + Some(initial_vault_root), + "Before pruning, should find block 1's vault root when querying at block 100" + ); + + let storage_root_before_prune = forest.get_storage_map_root(account_id, &slot_map, block_100); + assert_eq!( + storage_root_before_prune, + Some(initial_storage_map_root), + "Before pruning, should find block 1's storage root when querying at block 100" + ); + + // Prune at block 100 + // Block 1 is 99 blocks old, BUT it's the most recent entry for this account + // so it should NOT be pruned + let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(block_100); + + let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; + assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); + assert_eq!( + vault_roots_removed, 0, + "Should NOT prune block 1 vault root (it's the most recent for this account)" + ); + assert_eq!( + storage_roots_removed, 0, + "Should NOT prune block 1 storage root (it's the most recent for this account/slot)" + ); + assert_eq!( + storage_entries_removed, 0, + "Should NOT prune block 1 storage entries (it's the most recent for this account/slot)" + ); + + // After pruning, we should STILL be able to access block 1's data + // because it's the most recent entry for this account + let vault_root_after_prune = forest.get_vault_root(account_id, block_100); + assert_eq!( + vault_root_after_prune, + Some(initial_vault_root), + "After pruning, should still find vault root (block 1 preserved as most recent)" + ); + + let storage_root_after_prune = forest.get_storage_map_root(account_id, &slot_map, block_100); + assert_eq!( + storage_root_after_prune, + Some(initial_storage_map_root), + "After pruning, should still find storage root (block 1 preserved as most recent)" + ); + + // Verify we can still get witnesses and entries and verify against root + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .expect("Should be able to get witness for key1 after pruning"); + + let storage_root = forest.get_storage_map_root(account_id, &slot_map, block_100).unwrap(); + let proof: SmtProof = witness.into(); + assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); + + let entries = forest + .storage_map_entries(account_id, slot_map.clone(), block_1) + .expect("Should have storage map entries after pruning"); + assert_matches!(&entries.entries, StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 2, "Should have 2 entries (key1 and key2)"); + assert!(entries.contains(&(key1, value1)), "Should contain key1 with value1"); + assert!(entries.contains(&(key2, value2)), "Should contain key2 with value2"); + }); + + // Now add an update at block 51 (within retention window) to test that old entries + // get pruned when newer entries exist + let block_51 = BlockNumber::from(51); + + // Update with new values + let value1_new = num_to_word(3000); + let mut map_delta_51 = StorageMapDelta::default(); + map_delta_51.insert(key1, value1_new); + + let raw_51 = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta_51))]); + let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); + + let asset_51 = FungibleAsset::new(faucet_id, 200).unwrap(); + let mut vault_delta_51 = AccountVaultDelta::default(); + vault_delta_51.add_asset(asset_51.into()).unwrap(); + + let delta_51 = + AccountDelta::new(account_id, storage_delta_51, vault_delta_51, Felt::new(51)).unwrap(); + + forest.update_account(block_51, &delta_51).unwrap(); + + // Prune again at block 100 + let (vault_roots_removed_2, storage_roots_removed_2, storage_entries_removed_2) = + forest.prune(block_100); + + // Now block 1 should be pruned because there's a newer entry at block 51 + assert_eq!(vault_roots_removed_2, 1, "Should prune block 1 vault root (block 51 is newer)"); + assert_eq!( + storage_roots_removed_2, 1, + "Should prune block 1 storage root (block 51 is newer)" + ); + assert_eq!( + storage_entries_removed_2, 0, + "Storage entries are LRU-cached, not counted in prune results" + ); + + // Now verify we can access the account state at block 100 + // (should find block 51's entry via range query) + let vault_root_at_100 = forest + .get_vault_root(account_id, block_100) + .expect("Should find vault root at block 100 (from block 51 entry)"); + + let _storage_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map, block_100) + .expect("Should find storage root at block 100 (from block 51 entry)"); + + // The roots should be different from initial (state changed at block 51) + assert_ne!( + vault_root_at_100, initial_vault_root, + "Vault root should differ from initial (updated at block 51)" + ); + + // Verify we can get witnesses and entries for the updated state and verify against root + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .expect("Should be able to get witness for key1"); + + let storage_root = forest.get_storage_map_root(account_id, &slot_map, block_100).unwrap(); + let proof: SmtProof = witness.into(); + assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); + + let entries = forest + .storage_map_entries(account_id, slot_map.clone(), block_51) + .expect("Should have storage map entries"); + + match &entries.entries { + StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 2, "Should have 2 entries (key1 updated, key2 from block 1)"); + assert!( + entries.contains(&(key1, value1_new)), + "Should contain key1 with updated value" + ); + assert!( + entries.contains(&(key2, value2)), + "Should contain key2 with original value from block 1" + ); + }, + _ => panic!("Expected AllEntries"), + } + + // Verify querying at block 51 still works + let vault_root_at_51 = forest + .get_vault_root(account_id, block_51) + .expect("Should have vault root at block 51"); + assert_eq!(vault_root_at_51, vault_root_at_100); + + // Verify block 1 is no longer accessible + let vault_root_at_1 = forest.get_vault_root(account_id, block_1); + assert!( + vault_root_at_1.is_none(), + "Block 1 should not be accessible after pruning (block 51 is newer)" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_vault_only() { + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Block 1: Create vault with asset + let block_1 = BlockNumber::from(1); + let asset = FungibleAsset::new(faucet_id, 500).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let delta_1 = + AccountDelta::new(account_id, AccountStorageDelta::default(), vault_delta, Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(block_100); + + // Vault from block 1 should NOT be pruned (it's the most recent) + assert_eq!( + vault_roots_removed, 0, + "Should NOT prune vault root (it's the most recent for this account)" + ); + assert_eq!(storage_roots_removed, 0, "No storage roots to prune"); + assert_eq!(storage_entries_removed, 0, "No storage entries to prune"); + + // Verify vault is still accessible at block 100 + let vault_root_at_100 = forest + .get_vault_root(account_id, block_100) + .expect("Should still have vault root at block 100"); + assert_eq!(vault_root_at_100, initial_vault_root, "Vault root should be preserved"); + + // Verify we can get witnesses for the vault and verify against vault root + let witnesses = forest + .get_vault_asset_witnesses( + account_id, + block_100, + [AssetVaultKey::new_unchecked(asset.vault_key().into())].into(), + ) + .expect("Should be able to get vault witness after pruning"); + + assert_eq!(witnesses.len(), 1, "Should have one witness"); + let witness = &witnesses[0]; + let proof: SmtProof = witness.clone().into(); + assert_eq!( + proof.compute_root(), + vault_root_at_100, + "Vault witness must verify against vault root" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_storage_map_only() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let slot_map = StorageSlotName::mock(1); + let key1 = num_to_word(100); + let value1 = num_to_word(1000); + + // Block 1: Create storage map + let block_1 = BlockNumber::from(1); + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_storage_root = forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(block_100); + + // Storage map from block 1 should NOT be pruned (it's the most recent) + assert_eq!(vault_roots_removed, 0, "No vault roots to prune"); + assert_eq!( + storage_roots_removed, 0, + "Should NOT prune storage map root (it's the most recent for this account/slot)" + ); + assert_eq!( + storage_entries_removed, 0, + "Should NOT prune storage entries (it's the most recent for this account/slot)" + ); + + // Verify storage map is still accessible at block 100 + let storage_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map, block_100) + .expect("Should still have storage root at block 100"); + assert_eq!(storage_root_at_100, initial_storage_root, "Storage root should be preserved"); + + // Verify we can get witnesses for the storage map and verify against storage root + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .expect("Should be able to get storage witness after pruning"); + + let proof: SmtProof = witness.into(); + assert_eq!( + proof.compute_root(), + storage_root_at_100, + "Storage witness must verify against storage root" + ); + + // Verify we can get all entries + let entries = forest + .storage_map_entries(account_id, slot_map.clone(), block_1) + .expect("Should have storage entries after pruning"); + + match &entries.entries { + miden_node_proto::domain::account::StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 1, "Should have 1 entry"); + assert_eq!(entries[0], (key1, value1), "Entry should match"); + }, + _ => panic!("Expected AllEntries"), + } +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_storage_value_slot() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::StorageSlotDelta; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let slot_value = StorageSlotName::mock(1); + let value1 = num_to_word(5000); + + // Block 1: Create storage value slot + let block_1 = BlockNumber::from(1); + + let raw = BTreeMap::from_iter([(slot_value.clone(), StorageSlotDelta::Value(value1))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + // Note: Value slots don't have roots in InnerForest - they're just part of the + // account storage header. The InnerForest only tracks map slots. + // So there's nothing to verify for value slots in the forest. + + // This test documents that value slots are NOT tracked in InnerForest + // (they don't need to be, since their digest is 1:1 with the value) + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(block_100); + + // No roots should be pruned because there are no map slots + assert_eq!(vault_roots_removed, 0, "No vault roots in this test"); + assert_eq!( + storage_roots_removed, 0, + "Value slots don't create storage roots in InnerForest" + ); + assert_eq!( + storage_entries_removed, 0, + "Value slots don't create storage entries in InnerForest" + ); + + // Verify no storage map roots exist for this account + let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_100); + assert!( + storage_root.is_none(), + "Value slots don't have storage map roots in InnerForest" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_mixed_slots_independently() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let slot_map_a = StorageSlotName::mock(1); + let slot_map_b = StorageSlotName::mock(2); + let slot_value = StorageSlotName::mock(3); + + let key1 = num_to_word(100); + let value1 = num_to_word(1000); + let value_slot_data = num_to_word(5000); + + // Block 1: Create vault + two map slots + one value slot + let block_1 = BlockNumber::from(1); + + let asset = FungibleAsset::new(faucet_id, 100).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(key1, value1); + + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(key1, value1); + + let raw = BTreeMap::from_iter([ + (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), + (slot_value.clone(), StorageSlotDelta::Value(value_slot_data)), + ]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + let initial_map_a_root = forest.get_storage_map_root(account_id, &slot_map_a, block_1).unwrap(); + let initial_map_b_root = forest.get_storage_map_root(account_id, &slot_map_b, block_1).unwrap(); + + // Block 51: Update only map_a (within retention window) + let block_51 = BlockNumber::from(51); + let value2 = num_to_word(2000); + + let mut map_delta_a_update = StorageMapDelta::default(); + map_delta_a_update.insert(key1, value2); + + let raw_51 = + BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_update))]); + let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); + + let delta_51 = AccountDelta::new( + account_id, + storage_delta_51, + AccountVaultDelta::default(), + Felt::new(51), + ) + .unwrap(); + + forest.update_account(block_51, &delta_51).unwrap(); + + // Advance to block 100 + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(block_100); + + // Vault: block 1 is most recent, should NOT be pruned + // Map A: block 1 is old (block 51 is newer), SHOULD be pruned + // Map B: block 1 is most recent, should NOT be pruned + assert_eq!( + vault_roots_removed, 0, + "Vault root from block 1 should NOT be pruned (most recent)" + ); + assert_eq!( + storage_roots_removed, 1, + "Map A from block 1 should be pruned (block 51 is newer); Map B should NOT" + ); + assert_eq!( + storage_entries_removed, 0, + "Storage entries are LRU-cached, not counted in prune results" + ); + + // Verify vault is still accessible + let vault_root_at_100 = forest + .get_vault_root(account_id, block_100) + .expect("Vault should be accessible"); + assert_eq!(vault_root_at_100, initial_vault_root, "Vault should be from block 1"); + + // Verify map_a is accessible (from block 51) + let map_a_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map_a, block_100) + .expect("Map A should be accessible"); + assert_ne!( + map_a_root_at_100, initial_map_a_root, + "Map A should be from block 51, not block 1" + ); + + // Verify map_b is still accessible (from block 1) + let map_b_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map_b, block_100) + .expect("Map B should be accessible"); + assert_eq!( + map_b_root_at_100, initial_map_b_root, + "Map B should still be from block 1 (most recent)" + ); + + // Verify map_a block 1 is no longer accessible + let map_a_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_a, block_1); + assert!(map_a_root_at_1.is_none(), "Map A block 1 should be pruned"); + + // Verify map_b block 1 IS still accessible + let map_b_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_b, block_1); + assert!(map_b_root_at_1.is_some(), "Map B block 1 should NOT be pruned (most recent)"); +} diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index a63c92276..4b340e349 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,5 +1,7 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::num::NonZeroUsize; +use lru::LruCache; use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ @@ -16,10 +18,24 @@ use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; use miden_protocol::errors::{AssetError, StorageMapError}; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; +use tracing::instrument; + +use crate::COMPONENT; #[cfg(test)] mod tests; +// CONSTANTS +// ================================================================================================ + +/// Number of historical blocks to retain in the in-memory forest. +/// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be pruned. +pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; + +/// Default size for the LRU cache of latest storage map entries. +/// Used to serve `SlotData::All` queries for the most recent block. +const DEFAULT_STORAGE_CACHE_ENTRIES_SIZE: usize = 10_000; + // ERRORS // ================================================================================================ @@ -52,6 +68,12 @@ pub enum WitnessError { // INNER FOREST // ================================================================================================ +/// Snapshot of storage map entries at a specific block. +struct StorageSnapshot { + block_num: BlockNumber, + entries: BTreeMap, +} + /// Container for forest-related state that needs to be updated atomically. pub(crate) struct InnerForest { /// `SmtForest` for efficient account storage reconstruction. @@ -60,15 +82,34 @@ pub(crate) struct InnerForest { /// Maps (`account_id`, `slot_name`, `block_num`) to SMT root. /// Populated during block import for all storage map slots. + /// + /// Used for `SlotData::MapKeys` queries (SMT proof generation). + /// Works for all historical blocks within retention window. + /// + /// Attention: Must be a `BTreeMap`, since not every block contains a value here, so we need to + /// be able to query the previous blocks cheaply. storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, - /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. - /// Accumulated from deltas - each block's entries include all entries up to that point. - storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, + /// LRU cache of latest storage map entries for `SlotData::All` queries. + /// Only stores the most recent snapshot per (account, slot). + /// Historical queries fall back to DB. + storage_entries_per_user_block_slot: LruCache<(AccountId, StorageSlotName), StorageSnapshot>, + + vault_refcount: HashMap, + storage_slots_refcount: HashMap, /// Maps (`account_id`, `block_num`) to vault SMT root. /// Tracks asset vault versions across all blocks with structural sharing. + /// + /// Attention: Must be a `BTreeMap`, since not every block contains a value here, so we need to + /// be able to query the previous blocks cheaply. vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, + + /// Tracks vault roots by block number for pruning. + vault_roots_by_block: BTreeMap>, + + /// Tracks storage map roots by block number for pruning. + storage_slots_by_block: BTreeMap>, } impl InnerForest { @@ -76,8 +117,14 @@ impl InnerForest { Self { forest: SmtForest::new(), storage_map_roots: BTreeMap::new(), - storage_entries: BTreeMap::new(), + storage_entries_per_user_block_slot: LruCache::new( + NonZeroUsize::new(DEFAULT_STORAGE_CACHE_ENTRIES_SIZE).unwrap(), + ), + vault_refcount: HashMap::new(), + storage_slots_refcount: HashMap::new(), vault_roots: BTreeMap::new(), + vault_roots_by_block: BTreeMap::new(), + storage_slots_by_block: BTreeMap::new(), } } @@ -89,6 +136,24 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } + fn increment_refcount(map: &mut HashMap, root: Word) { + let entry = map.entry(root).or_insert(0); + *entry += 1; + } + + fn decrement_refcount(map: &mut HashMap, root: Word) -> bool { + let Some(count) = map.get_mut(&root) else { + return false; + }; + if *count == 1 { + map.remove(&root); + true + } else { + *count -= 1; + false + } + } + /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, @@ -181,35 +246,37 @@ impl InnerForest { Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) } - /// Returns all key-value entries for a specific account storage slot at or before a block. + /// Returns all key-value entries for a specific account storage slot at the latest cached + /// block. Historical queries fall back to DB reconstruction. + /// + /// Returns `None` if: + /// - No entries exist for this account/slot + /// - Query is for a historical block (not the most recent) /// - /// Uses range query semantics: finds the most recent entries at or before `block_num`. - /// Returns `None` if no entries exist for this account/slot up to the given block. /// Returns `LimitExceeded` if there are too many entries to return. pub(crate) fn storage_map_entries( - &self, + &mut self, account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, ) -> Option { - // Find the most recent entries at or before block_num - let entries = self - .storage_entries - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), block_num), - ) - .next_back() - .map(|(_, entries)| entries)?; + // Get cached snapshot + let snapshot = + self.storage_entries_per_user_block_slot.get(&(account_id, slot_name.clone()))?; - if entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + // Only serve queries for the latest block + if snapshot.block_num != block_num { + return None; // Historical query - caller should use DB + } + + if snapshot.entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { return Some(AccountStorageMapDetails { slot_name, entries: StorageMapEntries::LimitExceeded, }); } - let entries = Vec::from_iter(entries.iter().map(|(k, v)| (*k, *v))); + let entries = Vec::from_iter(snapshot.entries.iter().map(|(k, v)| (*k, *v))); Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) } @@ -229,6 +296,7 @@ impl InnerForest { /// # Errors /// /// Returns an error if applying a vault delta results in a negative balance. + #[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] pub(crate) fn apply_block_updates( &mut self, block_num: BlockNumber, @@ -245,6 +313,9 @@ impl InnerForest { "Updated forest with account delta" ); } + + let _ = self.prune(block_num); + Ok(()) } @@ -312,6 +383,8 @@ impl InnerForest { // anything into the forest) if delta.is_empty() { self.vault_roots.insert((account_id, block_num), prev_root); + self.vault_roots_by_block.entry(block_num).or_default().push(account_id); + Self::increment_refcount(&mut self.vault_refcount, prev_root); return; } @@ -340,6 +413,8 @@ impl InnerForest { .expect("forest insertion should succeed"); self.vault_roots.insert((account_id, block_num), new_root); + self.vault_roots_by_block.entry(block_num).or_default().push(account_id); + Self::increment_refcount(&mut self.vault_refcount, new_root); tracing::debug!( target: crate::COMPONENT, @@ -425,6 +500,8 @@ impl InnerForest { .expect("forest insertion should succeed"); self.vault_roots.insert((account_id, block_num), new_root); + self.vault_roots_by_block.entry(block_num).or_default().push(account_id); + Self::increment_refcount(&mut self.vault_refcount, new_root); tracing::debug!( target: crate::COMPONENT, @@ -455,23 +532,6 @@ impl InnerForest { .map_or_else(Self::empty_smt_root, |(_, root)| *root) } - /// Retrieves the most recent entries in the specified storage map. If no storage map exists - /// returns an empty map. - fn get_latest_storage_map_entries( - &self, - account_id: AccountId, - slot_name: &StorageSlotName, - ) -> BTreeMap { - self.storage_entries - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), - ) - .next_back() - .map(|(_, entries)| entries.clone()) - .unwrap_or_default() - } - /// Inserts all storage maps from the provided storage delta into the forest. /// /// Assumes that storage maps for the provided account are not in the forest already. @@ -501,12 +561,21 @@ impl InnerForest { .collect(); // if the delta is empty, make sure we create an entry in the storage map roots map - // and storage entries map (so storage_map_entries() queries work) + // and update the cache if map_entries.is_empty() { self.storage_map_roots .insert((account_id, slot_name.clone(), block_num), prev_root); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); + self.storage_slots_by_block + .entry(block_num) + .or_default() + .push((account_id, slot_name.clone())); + Self::increment_refcount(&mut self.storage_slots_refcount, prev_root); + + // Update cache with empty map + self.storage_entries_per_user_block_slot.put( + (account_id, slot_name.clone()), + StorageSnapshot { block_num, entries: BTreeMap::new() }, + ); continue; } @@ -519,16 +588,19 @@ impl InnerForest { self.storage_map_roots .insert((account_id, slot_name.clone(), block_num), new_root); + self.storage_slots_by_block + .entry(block_num) + .or_default() + .push((account_id, slot_name.clone())); + Self::increment_refcount(&mut self.storage_slots_refcount, new_root); assert!(!map_entries.is_empty(), "a non-empty delta should have entries"); let num_entries = map_entries.len(); - // keep track of the state of storage map entries - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let map_entries = BTreeMap::from_iter(map_entries); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), map_entries); + // Update cache with the entries from this insertion + let entries = BTreeMap::from_iter(map_entries); + self.storage_entries_per_user_block_slot + .put((account_id, slot_name.clone()), StorageSnapshot { block_num, entries }); tracing::debug!( target: crate::COMPONENT, @@ -571,21 +643,30 @@ impl InnerForest { self.storage_map_roots .insert((account_id, slot_name.clone(), block_num), new_root); - - // merge the delta with the latest entries in the map - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let mut latest_entries = self.get_latest_storage_map_entries(account_id, slot_name); - for (key, value) in &delta_entries { - if *value == EMPTY_WORD { - latest_entries.remove(key); + self.storage_slots_by_block + .entry(block_num) + .or_default() + .push((account_id, slot_name.clone())); + Self::increment_refcount(&mut self.storage_slots_refcount, new_root); + + // Update cache by merging delta with latest entries + let key = (account_id, slot_name.clone()); + let mut latest_entries = self + .storage_entries_per_user_block_slot + .get(&key) + .map(|s| s.entries.clone()) + .unwrap_or_default(); + + for (k, v) in &delta_entries { + if *v == EMPTY_WORD { + latest_entries.remove(k); } else { - latest_entries.insert(*key, *value); + latest_entries.insert(*k, *v); } } - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), latest_entries); + self.storage_entries_per_user_block_slot + .put(key, StorageSnapshot { block_num, entries: latest_entries }); tracing::debug!( target: crate::COMPONENT, @@ -597,4 +678,135 @@ impl InnerForest { ); } } + + // PRUNING + // -------------------------------------------------------------------------------------------- + + /// Prunes old entries from the in-memory forest data structures. + /// + /// Only iterates over blocks in the pruning window (before cutoff). For each affected account + /// or slot, checks if there's a newer entry before pruning - preserving the most recent state. + /// + /// The `SmtForest` itself is not pruned directly as it uses structural sharing and old roots + /// are naturally garbage-collected when they become unreachable. + /// + /// Note: Returns (`vault_roots_removed`, `storage_roots_removed`). Storage entries count is + /// no longer tracked since we use an LRU cache. + #[instrument(target = COMPONENT, skip_all, fields(block.number = %chain_tip), ret)] + pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> (usize, usize, usize) { + let cutoff_block = + BlockNumber::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); + + let vault_roots_removed = self.prune_vault_roots(cutoff_block); + let storage_roots_removed = self.prune_storage_roots(cutoff_block); + + // Cache is self-pruning via LRU eviction + (vault_roots_removed, storage_roots_removed, 0) + } + + /// Prunes vault roots beyond the cutoff block. + /// + /// Only iterates over blocks in the pruning window, then for each affected account checks + /// if there's a newer entry before pruning. + fn prune_vault_roots(&mut self, cutoff_block: BlockNumber) -> usize { + // Get blocks to prune (only blocks before cutoff) + let blocks_to_check: Vec = self + .vault_roots_by_block + .range(..cutoff_block) + .map(|(block, _)| *block) + .collect(); + + let mut roots_to_prune = HashSet::new(); + let mut roots_removed = 0usize; + + for block in blocks_to_check { + let Some(accounts) = self.vault_roots_by_block.remove(&block) else { + continue; + }; + + let mut accounts_to_keep = Vec::new(); + + for account_id in accounts { + // Check if there's a newer entry for this account + let has_newer_entry = self + .vault_roots + .range((account_id, block.child())..=(account_id, BlockNumber::from(u32::MAX))) + .next() + .is_some(); + + if has_newer_entry { + if let Some(root) = self.vault_roots.remove(&(account_id, block)) { + roots_removed += 1; + if Self::decrement_refcount(&mut self.vault_refcount, root) { + roots_to_prune.insert(root); + } + } + } else { + accounts_to_keep.push(account_id); + } + } + + if !accounts_to_keep.is_empty() { + self.vault_roots_by_block.insert(block, accounts_to_keep); + } + } + + self.forest.pop_smts(roots_to_prune); + roots_removed + } + + /// Prunes storage map roots older than/before the cutoff block. + /// + /// Only iterates over blocks in the pruning window, then for each affected slot checks + /// if there's a newer entry before pruning. + fn prune_storage_roots(&mut self, cutoff_block: BlockNumber) -> usize { + // Get blocks to prune (only blocks before cutoff) + let blocks_to_check: Vec = self + .storage_slots_by_block + .range(..cutoff_block) + .map(|(block, _)| *block) + .collect(); + + let mut roots_to_prune = HashSet::new(); + let mut roots_removed = 0usize; + + for block in blocks_to_check { + let Some(slots) = self.storage_slots_by_block.remove(&block) else { + continue; + }; + + let mut slots_to_keep = Vec::new(); + + for (account_id, slot_name) in slots { + // Check if there's a newer entry for this account/slot + let has_newer_entry = self + .storage_map_roots + .range( + (account_id, slot_name.clone(), block.child()) + ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next() + .is_some(); + + if has_newer_entry { + let key = (account_id, slot_name.clone(), block); + if let Some(root) = self.storage_map_roots.remove(&key) { + roots_removed += 1; + if Self::decrement_refcount(&mut self.storage_slots_refcount, root) { + roots_to_prune.insert(root); + } + } + } else { + slots_to_keep.push((account_id, slot_name)); + } + } + + if !slots_to_keep.is_empty() { + self.storage_slots_by_block.insert(block, slots_to_keep); + } + } + + self.forest.pop_smts(roots_to_prune); + roots_removed + } } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 5fc0cc6c0..4dbef7ff8 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -1,8 +1,11 @@ +use assert_matches::assert_matches; use miden_protocol::account::AccountCode; -use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; +use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; use miden_protocol::{Felt, FieldElement}; @@ -26,7 +29,6 @@ fn dummy_partial_delta( vault_delta: AccountVaultDelta, storage_delta: AccountStorageDelta, ) -> AccountDelta { - // For partial deltas, nonce_delta must be > 0 if there are changes let nonce_delta = if vault_delta.is_empty() && storage_delta.is_empty() { Felt::ZERO } else { @@ -39,43 +41,41 @@ fn dummy_partial_delta( fn dummy_full_state_delta(account_id: AccountId, assets: &[Asset]) -> AccountDelta { use miden_protocol::account::{Account, AccountStorage}; - // Create a minimal account with the given assets let vault = AssetVault::new(assets).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); let nonce = Felt::ONE; let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); - - // Convert to delta - this will be a full-state delta because it has code AccountDelta::try_from(account).unwrap() } +// INITIALIZATION & BASIC OPERATIONS +// ================================================================================================ + #[test] -fn test_empty_smt_root_is_recognized() { +fn empty_smt_root_is_recognized() { use miden_protocol::crypto::merkle::smt::Smt; let empty_root = InnerForest::empty_smt_root(); - // Verify an empty SMT has the expected root assert_eq!(Smt::default().root(), empty_root); - // Test that SmtForest accepts this root in batch_insert let mut forest = SmtForest::new(); let entries = vec![(Word::from([1u32, 2, 3, 4]), Word::from([5u32, 6, 7, 8]))]; - assert!(forest.batch_insert(empty_root, entries).is_ok()); + assert_matches!(forest.batch_insert(empty_root, entries), Ok(_)); } #[test] -fn test_inner_forest_basic_initialization() { +fn inner_forest_basic_initialization() { let forest = InnerForest::new(); assert!(forest.storage_map_roots.is_empty()); assert!(forest.vault_roots.is_empty()); } #[test] -fn test_update_account_with_empty_deltas() { +fn update_account_with_empty_deltas() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); @@ -88,37 +88,21 @@ fn test_update_account_with_empty_deltas() { forest.update_account(block_num, &delta).unwrap(); - // Empty deltas should not create entries assert!(!forest.vault_roots.contains_key(&(account_id, block_num))); assert!(forest.storage_map_roots.is_empty()); } -#[test] -fn test_update_vault_with_fungible_asset() { - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let block_num = BlockNumber::GENESIS.child(); - - let asset = dummy_fungible_asset(faucet_id, 100); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta.add_asset(asset).unwrap(); - - let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); - forest.update_account(block_num, &delta).unwrap(); - - let vault_root = forest.vault_roots[&(account_id, block_num)]; - assert_ne!(vault_root, EMPTY_WORD); -} +// VAULT TESTS +// ================================================================================================ #[test] -fn test_compare_partial_vs_full_state_delta_vault() { +fn vault_partial_vs_full_state_produces_same_root() { let account_id = dummy_account(); let faucet_id = dummy_faucet(); let block_num = BlockNumber::GENESIS.child(); let asset = dummy_fungible_asset(faucet_id, 100); - // Approach 1: Partial delta (simulates block application) + // Partial delta (block application) let mut forest_partial = InnerForest::new(); let mut vault_delta = AccountVaultDelta::default(); vault_delta.add_asset(asset).unwrap(); @@ -126,12 +110,11 @@ fn test_compare_partial_vs_full_state_delta_vault() { dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); forest_partial.update_account(block_num, &partial_delta).unwrap(); - // Approach 2: Full-state delta (simulates DB reconstruction) + // Full-state delta (DB reconstruction) let mut forest_full = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[asset]); forest_full.update_account(block_num, &full_delta).unwrap(); - // Both approaches must produce identical vault roots let root_partial = forest_partial.vault_roots.get(&(account_id, block_num)).unwrap(); let root_full = forest_full.vault_roots.get(&(account_id, block_num)).unwrap(); @@ -140,102 +123,20 @@ fn test_compare_partial_vs_full_state_delta_vault() { } #[test] -fn test_incremental_vault_updates() { - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - - // Block 1: 100 tokens - let block_1 = BlockNumber::GENESIS.child(); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); - let root_1 = forest.vault_roots[&(account_id, block_1)]; - - // Block 2: 150 tokens (update) - let block_2 = block_1.child(); - let mut vault_delta_2 = AccountVaultDelta::default(); - vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); - let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); - forest.update_account(block_2, &delta_2).unwrap(); - let root_2 = forest.vault_roots[&(account_id, block_2)]; - - assert_ne!(root_1, root_2); -} - -#[test] -fn test_vault_state_persists_across_blocks_without_changes() { - // Regression test for issue #7: vault state should persist across blocks - // where no changes occur, not reset to empty. +fn vault_incremental_updates_with_add_and_remove() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); - // Helper to query vault root at or before a block (range query) - let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { - forest - .vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) - .next_back() - .map(|(_, root)| *root) - }; - // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; - - // Blocks 2-5: No changes to this account (simulated by not calling update_account) - // This means no entries are added to vault_roots for these blocks. - - // Block 6: Add 50 more tokens - // The previous root lookup should find block_1's root, not return empty. - let block_6 = BlockNumber::from(6); - let mut vault_delta_6 = AccountVaultDelta::default(); - vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); // 100 + 50 = 150 - let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); - forest.update_account(block_6, &delta_6).unwrap(); - - // The root at block 6 should be different from block 1 (we added more tokens) - let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; - assert_ne!(root_after_block_1, root_after_block_6); - - // Verify range query finds the correct previous root for intermediate blocks - // Block 3 should return block 1's root (most recent before block 3) - let root_at_block_3 = get_vault_root(&forest, account_id, BlockNumber::from(3)); - assert_eq!(root_at_block_3, Some(root_after_block_1)); - - // Block 5 should also return block 1's root - let root_at_block_5 = get_vault_root(&forest, account_id, BlockNumber::from(5)); - assert_eq!(root_at_block_5, Some(root_after_block_1)); - - // Block 6 should return block 6's root - let root_at_block_6 = get_vault_root(&forest, account_id, block_6); - assert_eq!(root_at_block_6, Some(root_after_block_6)); -} - -#[test] -fn test_partial_delta_applies_fungible_changes_correctly() { - // Regression test for issue #8: partial deltas should apply changes to previous balance, - // not treat amounts as absolute values. - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - - // Block 1: Add 100 tokens (partial delta with +100) - let block_1 = BlockNumber::GENESIS.child(); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); let root_after_100 = forest.vault_roots[&(account_id, block_1)]; - // Block 2: Add 50 more tokens (partial delta with +50) - // Result should be 150 tokens, not 50 tokens + // Block 2: Add 50 more tokens (result: 150 tokens) let block_2 = block_1.child(); let mut vault_delta_2 = AccountVaultDelta::default(); vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); @@ -243,11 +144,9 @@ fn test_partial_delta_applies_fungible_changes_correctly() { forest.update_account(block_2, &delta_2).unwrap(); let root_after_150 = forest.vault_roots[&(account_id, block_2)]; - // Roots should be different (100 tokens vs 150 tokens) assert_ne!(root_after_100, root_after_150); - // Block 3: Remove 30 tokens (partial delta with -30) - // Result should be 120 tokens + // Block 3: Remove 30 tokens (result: 120 tokens) let block_3 = block_2.child(); let mut vault_delta_3 = AccountVaultDelta::default(); vault_delta_3.remove_asset(dummy_fungible_asset(faucet_id, 30)).unwrap(); @@ -255,11 +154,9 @@ fn test_partial_delta_applies_fungible_changes_correctly() { forest.update_account(block_3, &delta_3).unwrap(); let root_after_120 = forest.vault_roots[&(account_id, block_3)]; - // Root should change again assert_ne!(root_after_150, root_after_120); - // Verify by creating a fresh forest with a full-state delta of 120 tokens - // The roots should match + // Verify by comparing to full-state delta let mut fresh_forest = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 120)]); fresh_forest.update_account(block_3, &full_delta).unwrap(); @@ -269,96 +166,59 @@ fn test_partial_delta_applies_fungible_changes_correctly() { } #[test] -fn test_partial_delta_across_long_block_range() { - // Validation test: partial deltas should work across 101+ blocks. - // - // This test passes now because InnerForest keeps all history. Once pruning is implemented - // (estimated ~50 blocks), this test will fail unless DB fallback is also implemented. - // When that happens, the test should be updated to use DB fallback or converted to an - // integration test that has DB access. +fn vault_state_persists_across_block_gaps() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); - // Block 1: Add 1000 tokens + let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { + forest + .vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) + }; + + // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_1000 = forest.vault_roots[&(account_id, block_1)]; - - // Blocks 2-100: No changes to this account (simulating long gap) - - // Block 101: Add 500 more tokens (partial delta with +500) - // This requires looking up block 1's state across a 100-block gap. - let block_101 = BlockNumber::from(101); - let mut vault_delta_101 = AccountVaultDelta::default(); - vault_delta_101.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); - let delta_101 = - dummy_partial_delta(account_id, vault_delta_101, AccountStorageDelta::default()); - forest.update_account(block_101, &delta_101).unwrap(); - let root_after_1500 = forest.vault_roots[&(account_id, block_101)]; - - // Roots should be different (1000 tokens vs 1500 tokens) - assert_ne!(root_after_1000, root_after_1500); - - // Verify the final state matches a fresh forest with 1500 tokens - let mut fresh_forest = InnerForest::new(); - let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 1500)]); - fresh_forest.update_account(block_101, &full_delta).unwrap(); - let root_full_state_1500 = fresh_forest.vault_roots[&(account_id, block_101)]; - - assert_eq!(root_after_1500, root_full_state_1500); -} - -#[test] -fn test_update_storage_map() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let block_num = BlockNumber::GENESIS.child(); + let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; - let slot_name = StorageSlotName::mock(3); - let key = Word::from([1u32, 2, 3, 4]); - let value = Word::from([5u32, 6, 7, 8]); + // Blocks 2-5: No changes (simulated by not calling update_account) - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key, value); - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); + // Block 6: Add 50 more tokens (total: 150) + let block_6 = BlockNumber::from(6); + let mut vault_delta_6 = AccountVaultDelta::default(); + vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); + let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); + forest.update_account(block_6, &delta_6).unwrap(); + let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); + assert_ne!(root_after_block_1, root_after_block_6); - // Verify storage root was created - assert!( - forest - .storage_map_roots - .contains_key(&(account_id, slot_name.clone(), block_num)) + // Verify range query finds correct previous roots + assert_eq!( + get_vault_root(&forest, account_id, BlockNumber::from(3)), + Some(root_after_block_1) + ); + assert_eq!( + get_vault_root(&forest, account_id, BlockNumber::from(5)), + Some(root_after_block_1) ); - let storage_root = forest.storage_map_roots[&(account_id, slot_name, block_num)]; - assert_ne!(storage_root, InnerForest::empty_smt_root()); + assert_eq!(get_vault_root(&forest, account_id, block_6), Some(root_after_block_6)); } #[test] -fn test_full_state_delta_with_empty_vault_records_root() { - // Regression test for issue #1581: full-state deltas with empty vaults must still record - // the vault root so that subsequent `get_vault_asset_witnesses` calls succeed. - // - // The network counter account from the network monitor has an empty vault (it only uses - // storage slots). Without this fix, `get_vault_asset_witnesses` fails with "root not found" - // because no vault root was ever recorded for the account. +fn vault_full_state_with_empty_vault_records_root() { use miden_protocol::account::{Account, AccountStorage}; let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); - // Create a full-state delta with an empty vault (like the network counter account). let vault = AssetVault::new(&[]).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); @@ -366,27 +226,19 @@ fn test_full_state_delta_with_empty_vault_records_root() { let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); let full_delta = AccountDelta::try_from(account).unwrap(); - // Sanity check: the vault delta should be empty. assert!(full_delta.vault().is_empty()); assert!(full_delta.is_full_state()); forest.update_account(block_num, &full_delta).unwrap(); - // The vault root must be recorded even though the vault is empty. assert!( forest.vault_roots.contains_key(&(account_id, block_num)), "vault root should be recorded for full-state deltas with empty vaults" ); - // Verify the recorded root is the empty SMT root. let recorded_root = forest.vault_roots[&(account_id, block_num)]; - assert_eq!( - recorded_root, - InnerForest::empty_smt_root(), - "empty vault should have the empty SMT root" - ); + assert_eq!(recorded_root, InnerForest::empty_smt_root()); - // Verify `get_vault_asset_witnesses` succeeds (returns empty witnesses for empty keys). let witnesses = forest .get_vault_asset_witnesses(account_id, block_num, std::collections::BTreeSet::new()) .expect("get_vault_asset_witnesses should succeed for accounts with empty vaults"); @@ -394,7 +246,67 @@ fn test_full_state_delta_with_empty_vault_records_root() { } #[test] -fn test_storage_map_incremental_updates() { +fn vault_shared_root_retained_when_one_entry_pruned() { + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let faucet_id = dummy_faucet(); + let block_1 = BlockNumber::GENESIS.child(); + let asset_amount = u64::from(HISTORICAL_BLOCK_RETENTION); + let amount_increment = asset_amount / u64::from(HISTORICAL_BLOCK_RETENTION); + let asset = dummy_fungible_asset(faucet_id, asset_amount); + let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); + + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(asset).unwrap(); + let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, asset_amount)).unwrap(); + let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_2).unwrap(); + + let root1 = forest.vault_roots[&(account1, block_1)]; + let root2 = forest.vault_roots[&(account2, block_1)]; + assert_eq!(root1, root2); + + let block_at_51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); + let mut vault_delta_2_update = AccountVaultDelta::default(); + vault_delta_2_update + .add_asset(dummy_fungible_asset(faucet_id, amount_increment)) + .unwrap(); + let delta_2_update = + dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); + forest.update_account(block_at_51, &delta_2_update).unwrap(); + + let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); + let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(block_at_52); + + assert_eq!(vault_roots_removed, 1); + assert_eq!(storage_roots_removed, 0); + assert_eq!(storage_entries_removed, 0); + assert!(forest.vault_roots.contains_key(&(account1, block_1))); + assert!(!forest.vault_roots.contains_key(&(account2, block_1))); + assert_eq!(forest.vault_roots_by_block[&block_1], vec![account1]); + + let vault_root_at_52 = forest.get_vault_root(account1, block_at_52); + assert_eq!(vault_root_at_52, Some(root1)); + + let witnesses = forest + .get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()) + .expect("Should be able to get vault witness after pruning"); + assert_eq!(witnesses.len(), 1); + let proof: SmtProof = witnesses[0].clone().into(); + assert_eq!(proof.compute_root(), root1); +} + +// STORAGE MAP TESTS +// ================================================================================================ + +#[test] +fn storage_map_incremental_updates() { use std::collections::BTreeMap; use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; @@ -419,7 +331,7 @@ fn test_storage_map_incremental_updates() { forest.update_account(block_1, &delta_1).unwrap(); let root_1 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_1)]; - // Block 2: Insert key2 -> value2 (key1 should persist) + // Block 2: Insert key2 -> value2 let block_2 = block_1.child(); let mut map_delta_2 = StorageMapDelta::default(); map_delta_2.insert(key2, value2); @@ -439,14 +351,13 @@ fn test_storage_map_incremental_updates() { forest.update_account(block_3, &delta_3).unwrap(); let root_3 = forest.storage_map_roots[&(account_id, slot_name, block_3)]; - // All roots should be different assert_ne!(root_1, root_2); assert_ne!(root_2, root_3); assert_ne!(root_1, root_3); } #[test] -fn test_empty_storage_map_entries_query() { +fn storage_map_empty_entries_query() { use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::{ AccountBuilder, @@ -463,7 +374,6 @@ fn test_empty_storage_map_entries_query() { let block_num = BlockNumber::GENESIS.child(); let slot_name = StorageSlotName::mock(0); - // Create an account with an empty storage map slot let storage_map = StorageMap::with_entries(vec![]).unwrap(); let component_storage = vec![StorageSlot::with_map(slot_name.clone(), storage_map)]; @@ -483,15 +393,11 @@ fn test_empty_storage_map_entries_query() { .unwrap(); let account_id = account.id(); - - // Convert to full-state delta (this triggers insert_account_storage path) let full_delta = AccountDelta::try_from(account).unwrap(); - assert!(full_delta.is_full_state(), "delta should be full-state"); + assert!(full_delta.is_full_state()); - // Apply the delta forest.update_account(block_num, &full_delta).unwrap(); - // Verify storage_map_roots has an entry assert!( forest .storage_map_roots @@ -499,11 +405,9 @@ fn test_empty_storage_map_entries_query() { "storage_map_roots should have an entry for the empty map" ); - // Verify storage_map_entries returns Some (not None) - this is the bug fix validation let result = forest.storage_map_entries(account_id, slot_name.clone(), block_num); assert!(result.is_some(), "storage_map_entries should return Some for empty maps"); - // Verify the entries are empty let details = result.unwrap(); assert_eq!(details.slot_name, slot_name); match details.entries { @@ -518,3 +422,456 @@ fn test_empty_storage_map_entries_query() { }, } } + +#[test] +fn storage_map_open_returns_proofs() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(3); + let block_num = BlockNumber::GENESIS.child(); + + let mut map_delta = StorageMapDelta::default(); + for i in 0..20u32 { + let key = Word::from([i, 0, 0, 0]); + let value = Word::from([0, 0, 0, i]); + map_delta.insert(key, value); + } + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let keys: Vec = (0..20u32).map(|i| Word::from([i, 0, 0, 0])).collect(); + let result = forest.open_storage_map(account_id, slot_name.clone(), block_num, &keys); + + let details = result.expect("Should return Some").expect("Should not error"); + assert_matches!(details.entries, StorageMapEntries::EntriesWithProofs(entries) => { + assert_eq!(entries.len(), keys.len()); + }); +} + +// PRUNING TESTS +// ================================================================================================ + +const TEST_CHAIN_LENGTH: u32 = 100; +const TEST_AMOUNT_MULTIPLIER: u32 = 100; +const TEST_PRUNE_CHAIN_TIP: u32 = HISTORICAL_BLOCK_RETENTION + 5; + +#[test] +fn prune_handles_empty_forest() { + let mut forest = InnerForest::new(); + + let (vault_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(BlockNumber::GENESIS); + + assert_eq!(vault_removed, 0); + assert_eq!(storage_roots_removed, 0); + assert_eq!(storage_entries_removed, 0); // Always 0 now (LRU cache) +} + +#[test] +fn prune_removes_smt_roots_from_forest() { + use miden_protocol::account::delta::StorageMapDelta; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(7); + + for i in 1..=TEST_PRUNE_CHAIN_TIP { + let block_num = BlockNumber::from(i); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) + .unwrap(); + let storage_delta = if i.is_multiple_of(3) { + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(Word::from([1u32, 0, 0, 0]), Word::from([99u32, i, i * i, i * i * i])); + let asd = AccountStorageDelta::new(); + asd.add_updated_maps([(slot_name.clone(), map_delta)]) + } else { + AccountStorageDelta::default() + }; + + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + let retained_block = BlockNumber::from(TEST_PRUNE_CHAIN_TIP); + let pruned_block = BlockNumber::from(3u32); + let vault_root_retained = forest.vault_roots[&(account_id, retained_block)]; + let vault_root_pruned = forest.vault_roots[&(account_id, pruned_block)]; + let storage_root_pruned = + forest.storage_map_roots[&(account_id, slot_name.clone(), pruned_block)]; + + let (vault_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(retained_block); + + assert!(vault_removed > 0); + assert!(storage_roots_removed > 0); + assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted + assert!(forest.vault_roots.contains_key(&(account_id, retained_block))); + assert!(!forest.vault_roots.contains_key(&(account_id, pruned_block))); + assert!(!forest.storage_map_roots.contains_key(&(account_id, slot_name, pruned_block))); + + let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); + assert_matches!(forest.forest.open(vault_root_retained, asset_key), Ok(_)); + assert_matches!(forest.forest.open(vault_root_pruned, asset_key), Err(_)); + + let storage_key = StorageMap::hash_key(Word::from([1u32, 0, 0, 0])); + assert_matches!(forest.forest.open(storage_root_pruned, storage_key), Err(_)); +} + +#[test] +fn prune_respects_retention_boundary() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + for i in 1..=HISTORICAL_BLOCK_RETENTION { + let block_num = BlockNumber::from(i); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) + .unwrap(); + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + } + + let (vault_removed, storage_roots_removed, storage_entries_removed) = + forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); + + assert_eq!(vault_removed, 0); + assert_eq!(storage_roots_removed, 0); + assert_eq!(storage_entries_removed, 0); + assert_eq!(forest.vault_roots.len(), HISTORICAL_BLOCK_RETENTION as usize); +} + +#[test] +fn prune_vault_roots_removes_old_entries() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.vault_roots.len(), TEST_CHAIN_LENGTH as usize); + + let (vault_removed, ..) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION - 1) as usize; + assert_eq!(vault_removed, expected_removed); + + let expected_remaining = (HISTORICAL_BLOCK_RETENTION + 1) as usize; + assert_eq!(forest.vault_roots.len(), expected_remaining); + + let remaining_blocks = Vec::from_iter(forest.vault_roots.keys().map(|(_, b)| b.as_u32())); + let oldest_remaining = *remaining_blocks.iter().min().unwrap(); + let expected_oldest = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION; + assert_eq!(oldest_remaining, expected_oldest); +} + +#[test] +fn prune_storage_map_roots_removes_old_entries() { + use miden_protocol::account::delta::StorageMapDelta; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(3); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let key = Word::from([i, i * i, 5, 4]); + let value = Word::from([0, 0, i * i * i, 77]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let asd = AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), asd); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.storage_map_roots.len(), TEST_CHAIN_LENGTH as usize); + + let (_, storage_roots_removed, storage_entries_removed) = + forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION - 1) as usize; + assert_eq!(storage_roots_removed, expected_removed); + assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted + + let expected_remaining = (HISTORICAL_BLOCK_RETENTION + 1) as usize; + assert_eq!(forest.storage_map_roots.len(), expected_remaining); + // Cache size: LRU may have evicted entries, just verify it's populated + assert!(!forest.storage_entries_per_user_block_slot.is_empty()); +} + +#[test] +fn prune_handles_multiple_accounts() { + let mut forest = InnerForest::new(); + let account1 = dummy_account(); + let account2 = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let faucet_id = dummy_faucet(); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); + + let mut vault_delta1 = AccountVaultDelta::default(); + vault_delta1.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); + let delta1 = dummy_partial_delta(account1, vault_delta1, AccountStorageDelta::default()); + forest.update_account(block_num, &delta1).unwrap(); + + let mut vault_delta2 = AccountVaultDelta::default(); + vault_delta2.add_asset(dummy_fungible_asset(account2, amount * 2)).unwrap(); + let delta2 = dummy_partial_delta(account2, vault_delta2, AccountStorageDelta::default()); + forest.update_account(block_num, &delta2).unwrap(); + } + + assert_eq!(forest.vault_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); + + let (vault_removed, ..) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + let expected_removed_per_account = + (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION - 1) as usize; + assert_eq!(vault_removed, expected_removed_per_account * 2); + + let expected_remaining_per_account = (HISTORICAL_BLOCK_RETENTION + 1) as usize; + let account1_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account1).count(); + let account2_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account2).count(); + assert_eq!(account1_entries, expected_remaining_per_account); + assert_eq!(account2_entries, expected_remaining_per_account); +} + +#[test] +fn prune_handles_multiple_slots() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_a = StorageSlotName::mock(1); + let slot_b = StorageSlotName::mock(2); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(Word::from([i, 0, 0, 0]), Word::from([i, 0, 0, 1])); + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(Word::from([i, 0, 0, 2]), Word::from([i, 0, 0, 3])); + let raw = BTreeMap::from_iter([ + (slot_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_b.clone(), StorageSlotDelta::Map(map_delta_b)), + ]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.storage_map_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); + + let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); + let (_, storage_roots_removed, storage_entries_removed) = forest.prune(chain_tip); + + let cutoff = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION; + let expected_removed_per_slot = cutoff - 1; + let expected_removed = expected_removed_per_slot * 2; + assert_eq!(storage_roots_removed, expected_removed as usize); + assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted + + let expected_remaining = HISTORICAL_BLOCK_RETENTION + 1; + assert_eq!(forest.storage_map_roots.len(), (expected_remaining * 2) as usize); + // Cache contains 2 latest entries (one per slot) + assert_eq!(forest.storage_entries_per_user_block_slot.len(), 2); +} + +#[test] +fn prune_preserves_most_recent_state_per_entity() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_map_a = StorageSlotName::mock(1); + let slot_map_b = StorageSlotName::mock(2); + + // Block 1: Create vault + map_a + map_b + let block_1 = BlockNumber::from(1); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(Word::from([1u32, 0, 0, 0]), Word::from([100u32, 0, 0, 0])); + + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(Word::from([2u32, 0, 0, 0]), Word::from([200u32, 0, 0, 0])); + + let raw = BTreeMap::from_iter([ + (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), + ]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + // Block 51: Update only map_a + let block_at_51 = BlockNumber::from(51); + let mut map_delta_a_new = StorageMapDelta::default(); + map_delta_a_new.insert(Word::from([1u32, 0, 0, 0]), Word::from([999u32, 0, 0, 0])); + + let raw_at_51 = + BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_new))]); + let storage_delta_at_51 = AccountStorageDelta::from_raw(raw_at_51); + let delta_at_51 = + dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_at_51); + forest.update_account(block_at_51, &delta_at_51).unwrap(); + + // Block 100: Prune + let block_100 = BlockNumber::from(100); + let (vault_removed, storage_roots_removed, storage_entries_removed) = forest.prune(block_100); + + // Vault at block 1 preserved (most recent) + assert_eq!(vault_removed, 0); + assert!(forest.vault_roots.contains_key(&(account_id, block_1))); + + // map_a: Block 51 preserved, block 1 pruned + assert!( + forest + .storage_map_roots + .contains_key(&(account_id, slot_map_a.clone(), block_at_51)) + ); + assert!(!forest.storage_map_roots.contains_key(&(account_id, slot_map_a, block_1))); + + // map_b: Block 1 preserved (most recent) + assert!(forest.storage_map_roots.contains_key(&(account_id, slot_map_b, block_1))); + + assert_eq!(storage_roots_removed, 1); + assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted +} + +#[test] +fn prune_preserves_entries_within_retention_window() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_map = StorageSlotName::mock(1); + + let blocks = [1, 25, 50, 75, 100]; + + for &block_num in &blocks { + let block = BlockNumber::from(block_num); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, u64::from(block_num) * 100)) + .unwrap(); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(Word::from([block_num, 0, 0, 0]), Word::from([block_num * 10, 0, 0, 0])); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block, &delta).unwrap(); + } + + // Block 100: Prune (retention window = 50 blocks, cutoff = 50) + let block_100 = BlockNumber::from(100); + let (vault_removed, storage_roots_removed, _) = forest.prune(block_100); + + // Blocks 1 and 25 pruned (outside retention, have newer entries) + assert_eq!(vault_removed, 2); + assert_eq!(storage_roots_removed, 2); + + // Verify preserved entries + assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(1)))); + assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(25)))); + assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(50)))); + assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(75)))); + assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(100)))); +} + +/// Two accounts start with identical vault roots (same asset amount). When one account changes +/// in the next block, verify the unchanged account's vault root still works for lookups and +/// witness generation. +#[test] +fn shared_vault_root_retained_when_one_account_changes() { + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let faucet_id = dummy_faucet(); + + // Block 1: Both accounts have identical vaults (same asset) + let block_1 = BlockNumber::GENESIS.child(); + let initial_amount = 1000u64; + let asset = dummy_fungible_asset(faucet_id, initial_amount); + let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); + + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(asset).unwrap(); + let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2 + .add_asset(dummy_fungible_asset(faucet_id, initial_amount)) + .unwrap(); + let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_2).unwrap(); + + // Both accounts should have the same vault root (structural sharing in SmtForest) + let root1_at_block1 = forest.vault_roots[&(account1, block_1)]; + let root2_at_block1 = forest.vault_roots[&(account2, block_1)]; + assert_eq!(root1_at_block1, root2_at_block1, "identical vaults should have identical roots"); + + // Block 2: Only account2 changes (adds more assets) + let block_2 = block_1.child(); + let mut vault_delta_2_update = AccountVaultDelta::default(); + vault_delta_2_update.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); + let delta_2_update = + dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2_update).unwrap(); + + // Account2 now has a different root + let root2_at_block2 = forest.vault_roots[&(account2, block_2)]; + assert_ne!(root2_at_block1, root2_at_block2, "account2 vault should have changed"); + + // Account1 has no entry at block 2, but lookup should still return block 1's root + assert!(!forest.vault_roots.contains_key(&(account1, block_2))); + let root1_lookup = forest.get_vault_root(account1, block_2); + assert_eq!( + root1_lookup, + Some(root1_at_block1), + "account1 should still resolve to block 1 root" + ); + + // Account1 should still be able to generate witnesses at block 2 (using block 1's data) + let witnesses = forest + .get_vault_asset_witnesses(account1, block_2, [asset_key].into()) + .expect("witness generation should succeed for unchanged account"); + assert_eq!(witnesses.len(), 1); + + // The proof should verify against the original root + let proof: SmtProof = witnesses[0].clone().into(); + assert_eq!(proof.compute_root(), root1_at_block1); +} diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 504ea0631..4171053fe 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -216,7 +216,7 @@ pub async fn load_mmr(db: &mut Db) -> Result = block - .body() + let duplicate_nullifiers: Vec<_> = body .created_nullifiers() .iter() .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) @@ -304,11 +304,7 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -325,9 +321,7 @@ impl State { let account_tree_update = inner .account_tree .compute_mutations( - block - .body() - .updated_accounts() + body.updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), ) @@ -355,14 +349,13 @@ impl State { ) }; - // build note tree - let note_tree = block.body().compute_block_note_tree(); + // Build note tree + let note_tree = body.compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } - let notes = block - .body() + let notes = body .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -401,12 +394,12 @@ impl State { // Extract public account updates with deltas before block is moved into async task. // Private accounts are filtered out since they don't expose their state changes. let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { + Vec::from_iter(body.updated_accounts().iter().filter_map( + |update| match update.details() { AccountUpdateDetails::Delta(delta) => Some(delta.clone()), AccountUpdateDetails::Private => None, - } - })); + }, + )); // The DB and in-memory state updates need to be synchronized and are partially // overlapping. Namely, the DB transaction only proceeds after this task acquires the @@ -471,7 +464,8 @@ impl State { .in_current_span() .await?; - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + let mut forest = self.forest.write().await; + forest.apply_block_updates(block_num, account_deltas)?; info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); @@ -1108,7 +1102,7 @@ impl State { Vec::::with_capacity(storage_requests.len()); // Use forest for storage map queries - let forest_guard = self.forest.read().await; + let mut forest_guard = self.forest.write().await; for StorageMapRequest { slot_name, slot_data } in storage_requests { let details = match &slot_data { @@ -1120,13 +1114,28 @@ impl State { block_num, })? .map_err(DatabaseError::MerkleError)?, - SlotData::All => forest_guard - .storage_map_entries(account_id, slot_name.clone(), block_num) - .ok_or_else(|| DatabaseError::StorageRootNotFound { - account_id, - slot_name: slot_name.to_string(), - block_num, - })?, + SlotData::All => { + // Try cache first (latest block only) + if let Some(details) = + forest_guard.storage_map_entries(account_id, slot_name.clone(), block_num) + { + details + } else { + // we don't want to hold the forest guard for a prolonged time + drop(forest_guard); + // TODO we collect all storage items + let details = self + .db + .reconstruct_storage_map_from_db( + account_id, + slot_name.clone(), + block_num, + ) + .await?; + forest_guard = self.forest.write().await; + details + } + }, }; storage_map_details.push(details); From 0c968367a2bcd9e5814cffaccde295540d29e0f9 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 11 Feb 2026 14:03:28 +0100 Subject: [PATCH 02/18] yay --- Cargo.lock | 11 +- crates/store/Cargo.toml | 2 +- crates/store/src/db/mod.rs | 42 +++- .../store/src/db/models/queries/accounts.rs | 13 +- crates/store/src/db/tests.rs | 198 ++++++++---------- crates/store/src/inner_forest/mod.rs | 180 ++++++++-------- crates/store/src/inner_forest/tests.rs | 76 ++++--- crates/store/src/state/mod.rs | 29 ++- 8 files changed, 281 insertions(+), 270 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e6313649b..3dda03df0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2405,15 +2405,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "lru" -version = "0.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" -dependencies = [ - "hashbrown 0.15.5", -] - [[package]] name = "lru" version = "0.14.0" @@ -2919,7 +2910,7 @@ dependencies = [ "hex", "indexmap 2.13.0", "libsqlite3-sys", - "lru 0.12.5", + "lru 0.16.3", "miden-crypto", "miden-node-proto", "miden-node-proto-build", diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 7f296d2e0..d28c61572 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -25,7 +25,7 @@ fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } libsqlite3-sys = { workspace = true } -lru = { version = "0.12" } +lru = { workspace = true } miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7bed9f0ef..ed087991a 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -6,6 +6,7 @@ use anyhow::Context; use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_proto::generated as proto; +use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; @@ -600,9 +601,23 @@ impl Db { &self, account_id: AccountId, block_range: RangeInclusive, + entries_limit: Option, ) -> Result { + let entries_limit = entries_limit.unwrap_or_else(|| { + // TODO: These limits should be given by the protocol. + // See miden-base/issues/1770 for more details + pub const ROW_OVERHEAD_BYTES: usize = + 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx + MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES + }); + self.transact("select storage map sync values", move |conn| { - models::queries::select_account_storage_map_values(conn, account_id, block_range) + models::queries::select_account_storage_map_values_paged( + conn, + account_id, + block_range, + entries_limit, + ) }) .await } @@ -611,21 +626,35 @@ impl Db { /// /// Used as fallback when `InnerForest` cache misses (historical or evicted queries). /// Rebuilds all entries by querying the DB and filtering to the specific slot. + /// + /// Returns: + /// - `::LimitExceeded` when too many entries are present + /// - `::AllEntries` if the size is sufficiently small pub(crate) async fn reconstruct_storage_map_from_db( &self, account_id: AccountId, slot_name: miden_protocol::account::StorageSlotName, block_num: BlockNumber, + entries_limit: Option, ) -> Result { - use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; + use miden_node_proto::domain::account::AccountStorageMapDetails; use miden_protocol::EMPTY_WORD; + // TODO this remains expensive with a large history until we implement pruning for DB + // columns let values = self - .select_storage_map_sync_values(account_id, BlockNumber::GENESIS..=block_num) + .select_storage_map_sync_values( + account_id, + BlockNumber::GENESIS..=block_num, + entries_limit, + ) .await?; + if values.last_block_included != block_num { + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + } // Filter to the specific slot and collect latest values per key - let mut latest_values: BTreeMap = BTreeMap::new(); + let mut latest_values = BTreeMap::::new(); for value in values.values { if value.slot_name == slot_name { latest_values.insert(value.key, value.value); @@ -636,10 +665,7 @@ impl Db { latest_values.retain(|_, v| *v != EMPTY_WORD); if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { - return Ok(AccountStorageMapDetails { - slot_name, - entries: StorageMapEntries::LimitExceeded, - }); + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); } let entries = Vec::from_iter(latest_values.into_iter()); diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index fef733cb6..b0f0c12ea 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -653,19 +653,14 @@ impl StorageMapValue { /// /// * Response payload size: 0 <= size <= 2MB /// * Storage map values per response: 0 <= count <= (2MB / (2*Word + u32 + u8)) + 1 -pub(crate) fn select_account_storage_map_values( +pub(crate) fn select_account_storage_map_values_paged( conn: &mut SqliteConnection, account_id: AccountId, block_range: RangeInclusive, + limit: usize, ) -> Result { use schema::account_storage_map_values as t; - // TODO: These limits should be given by the protocol. - // See miden-base/issues/1770 for more details - pub const ROW_OVERHEAD_BYTES: usize = - 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; - if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); } @@ -686,13 +681,13 @@ pub(crate) fn select_account_storage_map_values( .and(t::block_num.le(block_range.end().to_raw_sql())), ) .order(t::block_num.asc()) - .limit(i64::try_from(MAX_ROWS + 1).expect("limit fits within i64")) + .limit(i64::try_from(limit + 1).expect("limit fits within i64")) .load(conn)?; // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() > MAX_ROWS + && raw.len() > limit { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index c8e54e647..eef076ca2 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -80,6 +80,7 @@ use crate::db::migrations::apply_migrations; use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; use crate::db::models::{Page, queries, utils}; use crate::errors::DatabaseError; +use crate::inner_forest::HISTORICAL_BLOCK_RETENTION; fn create_db() -> SqliteConnection { let mut conn = SqliteConnection::establish(":memory:").expect("In memory sqlite always works"); @@ -1071,9 +1072,13 @@ fn sql_account_storage_map_values_insertion() { AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); insert_account_delta(conn, account_id, block1, &delta1); - let storage_map_page = - queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block1) - .unwrap(); + let storage_map_page = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block1, + 1024, + ) + .unwrap(); assert_eq!(storage_map_page.values.len(), 2, "expect 2 initial rows"); // Update key1 at block 2 @@ -1086,9 +1091,13 @@ fn sql_account_storage_map_values_insertion() { .unwrap(); insert_account_delta(conn, account_id, block2, &delta2); - let storage_map_values = - queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block2) - .unwrap(); + let storage_map_values = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block2, + 1024, + ) + .unwrap(); assert_eq!(storage_map_values.values.len(), 3, "three rows (with duplicate key)"); // key1 should now be value3 at block2; key2 remains value2 at block1 @@ -1182,10 +1191,11 @@ fn select_storage_map_sync_values() { ) .unwrap(); - let page = queries::select_account_storage_map_values( + let page = queries::select_account_storage_map_values_paged( &mut conn, account_id, BlockNumber::from(2)..=BlockNumber::from(3), + 1024, ) .unwrap(); @@ -2119,10 +2129,11 @@ fn db_roundtrip_storage_map_values() { .unwrap(); // Retrieve - let page = queries::select_account_storage_map_values( + let page = queries::select_account_storage_map_values_paged( &mut conn, account_id, BlockNumber::GENESIS..=block_num, + 1024, ) .unwrap(); @@ -2315,10 +2326,11 @@ fn inner_forest_matches_db_storage_map_roots_across_updates() { slot_name: &StorageSlotName, block_num: BlockNumber, ) -> Option { - let storage_values = queries::select_account_storage_map_values( + let storage_values = queries::select_account_storage_map_values_paged( conn, account_id, BlockNumber::GENESIS..=block_num, + 1024, ) .unwrap(); @@ -2502,7 +2514,12 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); let account3 = AccountId::try_from(ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE).unwrap(); - let block1 = BlockNumber::from(1); + let block01 = BlockNumber::from(1); + let block02 = BlockNumber::from(2); + let block50 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION); + let block51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); + let block52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); + let block53 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 3); let slot_name = StorageSlotName::mock(1); let key1 = num_to_word(100); @@ -2515,6 +2532,7 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { map_delta.insert(key1, value1); map_delta.insert(key2, value2); + // Setups a single slot with a map and two key-value-pairs let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta.clone()))]); let storage = AccountStorageDelta::from_raw(raw); @@ -2522,37 +2540,38 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { let delta1 = AccountDelta::new(account1, storage.clone(), AccountVaultDelta::default(), Felt::ONE) .unwrap(); - forest.update_account(block1, &delta1).unwrap(); + forest.update_account(block01, &delta1).unwrap(); // Account 2 (same storage) let delta2 = AccountDelta::new(account2, storage.clone(), AccountVaultDelta::default(), Felt::ONE) .unwrap(); - forest.update_account(block1, &delta2).unwrap(); + forest.update_account(block02, &delta2).unwrap(); // Account 3 (same storage) let delta3 = AccountDelta::new(account3, storage.clone(), AccountVaultDelta::default(), Felt::ONE) .unwrap(); - forest.update_account(block1, &delta3).unwrap(); + forest.update_account(block02, &delta3).unwrap(); // All three accounts should have the same root (structural sharing in SmtForest) - let root1 = forest.get_storage_map_root(account1, &slot_name, block1).unwrap(); - let root2 = forest.get_storage_map_root(account2, &slot_name, block1).unwrap(); - let root3 = forest.get_storage_map_root(account3, &slot_name, block1).unwrap(); + let root1 = forest.get_storage_map_root(account1, &slot_name, block01).unwrap(); + let root2 = forest.get_storage_map_root(account2, &slot_name, block02).unwrap(); + let root3 = forest.get_storage_map_root(account3, &slot_name, block02).unwrap(); - assert_eq!(root1, root2, "Identical maps should have same root"); - assert_eq!(root2, root3, "Identical maps should have same root"); + // identical maps means identical roots + assert_eq!(root1, root2); + assert_eq!(root2, root3); // Verify we can get witnesses for all three accounts and verify them against roots let witness1 = forest - .get_storage_map_witness(account1, &slot_name, block1, key1) + .get_storage_map_witness(account1, &slot_name, block01, key1) .expect("Account1 should have accessible storage map"); let witness2 = forest - .get_storage_map_witness(account2, &slot_name, block1, key1) + .get_storage_map_witness(account2, &slot_name, block02, key1) .expect("Account2 should have accessible storage map"); let witness3 = forest - .get_storage_map_witness(account3, &slot_name, block1, key1) + .get_storage_map_witness(account3, &slot_name, block02, key1) .expect("Account3 should have accessible storage map"); // Verify witnesses against storage map roots using SmtProof::compute_root @@ -2565,19 +2584,11 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { let proof3: SmtProof = witness3.into(); assert_eq!(proof3.compute_root(), root3, "Witness3 must verify against root3"); - // Now prune account1's storage (simulate it being old enough to prune) - // This should NOT affect account2 and account3 - let block_to_prune = block1; - let (_, storage_roots_removed, _) = forest.prune(block_to_prune); - - // No roots should be removed since block1 is at the chain tip - assert_eq!(storage_roots_removed, 0, "No roots should be pruned at chain tip"); + let (_, storage_roots_removed) = forest.prune(block50); + // nothing should be pruned yet, it's still in the window + assert_eq!(storage_roots_removed, 0); - // Advance chain and create a scenario where only account1 is old - let block51 = BlockNumber::from(51); - let block52 = BlockNumber::from(52); - - // Update account2 at block 51 (keeps it recent) + // Update accounts 1,2,3 let mut map_delta_update = StorageMapDelta::default(); map_delta_update.insert(key1, num_to_word(1001)); // Slight change let raw_update = @@ -2592,39 +2603,44 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { .unwrap(); forest.update_account(block51, &delta2_update).unwrap(); - // Update account3 at block 52 (keeps it recent) - let delta3_update = - AccountDelta::new(account3, storage_update, AccountVaultDelta::default(), Felt::new(2)) - .unwrap(); + let delta3_update = AccountDelta::new( + account3, + storage_update.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); forest.update_account(block52, &delta3_update).unwrap(); // Prune at block 52 - // - Account1: block 1 is most recent, should NOT be pruned - // - Account2: block 1 is old (block 51 is newer), should be pruned - // - Account3: block 1 is old (block 52 is newer), should be pruned - let (_, storage_roots_removed, storage_entries_removed) = forest.prune(block52); + let (_, storage_roots_removed) = forest.prune(block52); + // the root for account01 is the most recent, which is the same as the other two, so nothing + // should be pruned + assert_eq!(storage_roots_removed, 0); - assert_eq!( - storage_roots_removed, 2, - "Should prune accounts 2 and 3's old storage roots from block 1 (account1's block 1 is most recent)" - ); - assert_eq!( - storage_entries_removed, 0, - "Storage entries are LRU-cached, not counted in prune results" - ); + // ensure the root is stil accessible + let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block01); + assert!(account1_root_after_prune.is_some()); - // Account1 at block1 should STILL be accessible (it's the most recent for account1) - let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block1); - assert!( - account1_root_after_prune.is_some(), - "Account1's block 1 root should NOT be pruned (it's the most recent for account1)" - ); + let delta1_update = + AccountDelta::new(account1, storage_update, AccountVaultDelta::default(), Felt::new(2)) + .unwrap(); + forest.update_account(block53, &delta1_update).unwrap(); + + // Prune at block 53 + let (_, storage_roots_removed) = forest.prune(block53); + // the roots from block01 and block02 are now all obsolete and should remove 2 storage entries + assert_eq!(storage_roots_removed, 1); // Account2 and Account3 should still be accessible at their recent blocks - let account2_root = forest.get_storage_map_root(account2, &slot_name, block51).unwrap(); - let account3_root = forest.get_storage_map_root(account3, &slot_name, block52).unwrap(); + let account1_root = forest.get_storage_map_root(account1, &slot_name, block53).unwrap(); + let account2_root = forest.get_storage_map_root(account2, &slot_name, block53).unwrap(); + let account3_root = forest.get_storage_map_root(account3, &slot_name, block53).unwrap(); // Verify we can still get witnesses for account2 and account3 and verify against roots + let witness1_after = forest + .get_storage_map_witness(account2, &slot_name, block51, key1) + .expect("Account2 should still have accessible storage map after pruning account1"); let witness2_after = forest .get_storage_map_witness(account2, &slot_name, block51, key1) .expect("Account2 should still have accessible storage map after pruning account1"); @@ -2633,25 +2649,12 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { .expect("Account3 should still have accessible storage map after pruning account1"); // Verify witnesses against storage map roots + let proof1: SmtProof = witness1_after.into(); + assert_eq!(proof1.compute_root(), account1_root,); let proof2: SmtProof = witness2_after.into(); - assert_eq!( - proof2.compute_root(), - account2_root, - "Witness2 must verify against account2_root" - ); - + assert_eq!(proof2.compute_root(), account2_root,); let proof3: SmtProof = witness3_after.into(); - assert_eq!( - proof3.compute_root(), - account3_root, - "Witness3 must verify against account3_root" - ); - - // The shared root should still be usable for account2 and account3 - // Even though account1's reference was removed, the SMT in the forest is preserved - // by the references from account2 and account3 - assert_ne!(account2_root, root1, "Account2's root should be different after update"); - assert_ne!(account3_root, root1, "Account3's root should be different after update"); + assert_eq!(proof3.compute_root(), account3_root,); } #[test] @@ -2724,8 +2727,7 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { // Prune at block 100 // Block 1 is 99 blocks old, BUT it's the most recent entry for this account // so it should NOT be pruned - let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(block_100); + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); @@ -2737,10 +2739,6 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { storage_roots_removed, 0, "Should NOT prune block 1 storage root (it's the most recent for this account/slot)" ); - assert_eq!( - storage_entries_removed, 0, - "Should NOT prune block 1 storage entries (it's the most recent for this account/slot)" - ); // After pruning, we should STILL be able to access block 1's data // because it's the most recent entry for this account @@ -2768,7 +2766,7 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); let entries = forest - .storage_map_entries(account_id, slot_map.clone(), block_1) + .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_1) .expect("Should have storage map entries after pruning"); assert_matches!(&entries.entries, StorageMapEntries::AllEntries(entries) => { assert_eq!(entries.len(), 2, "Should have 2 entries (key1 and key2)"); @@ -2798,8 +2796,7 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { forest.update_account(block_51, &delta_51).unwrap(); // Prune again at block 100 - let (vault_roots_removed_2, storage_roots_removed_2, storage_entries_removed_2) = - forest.prune(block_100); + let (vault_roots_removed_2, storage_roots_removed_2) = forest.prune(block_100); // Now block 1 should be pruned because there's a newer entry at block 51 assert_eq!(vault_roots_removed_2, 1, "Should prune block 1 vault root (block 51 is newer)"); @@ -2807,10 +2804,6 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { storage_roots_removed_2, 1, "Should prune block 1 storage root (block 51 is newer)" ); - assert_eq!( - storage_entries_removed_2, 0, - "Storage entries are LRU-cached, not counted in prune results" - ); // Now verify we can access the account state at block 100 // (should find block 51's entry via range query) @@ -2838,7 +2831,7 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); let entries = forest - .storage_map_entries(account_id, slot_map.clone(), block_51) + .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_51) .expect("Should have storage map entries"); match &entries.entries { @@ -2897,8 +2890,7 @@ fn inner_forest_preserves_most_recent_vault_only() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(block_100); + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); // Vault from block 1 should NOT be pruned (it's the most recent) assert_eq!( @@ -2906,7 +2898,6 @@ fn inner_forest_preserves_most_recent_vault_only() { "Should NOT prune vault root (it's the most recent for this account)" ); assert_eq!(storage_roots_removed, 0, "No storage roots to prune"); - assert_eq!(storage_entries_removed, 0, "No storage entries to prune"); // Verify vault is still accessible at block 100 let vault_root_at_100 = forest @@ -2969,8 +2960,7 @@ fn inner_forest_preserves_most_recent_storage_map_only() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(block_100); + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); // Storage map from block 1 should NOT be pruned (it's the most recent) assert_eq!(vault_roots_removed, 0, "No vault roots to prune"); @@ -2978,10 +2968,6 @@ fn inner_forest_preserves_most_recent_storage_map_only() { storage_roots_removed, 0, "Should NOT prune storage map root (it's the most recent for this account/slot)" ); - assert_eq!( - storage_entries_removed, 0, - "Should NOT prune storage entries (it's the most recent for this account/slot)" - ); // Verify storage map is still accessible at block 100 let storage_root_at_100 = forest @@ -3003,7 +2989,7 @@ fn inner_forest_preserves_most_recent_storage_map_only() { // Verify we can get all entries let entries = forest - .storage_map_entries(account_id, slot_map.clone(), block_1) + .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_1) .expect("Should have storage entries after pruning"); match &entries.entries { @@ -3053,8 +3039,7 @@ fn inner_forest_preserves_most_recent_storage_value_slot() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(block_100); + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); // No roots should be pruned because there are no map slots assert_eq!(vault_roots_removed, 0, "No vault roots in this test"); @@ -3062,10 +3047,6 @@ fn inner_forest_preserves_most_recent_storage_value_slot() { storage_roots_removed, 0, "Value slots don't create storage roots in InnerForest" ); - assert_eq!( - storage_entries_removed, 0, - "Value slots don't create storage entries in InnerForest" - ); // Verify no storage map roots exist for this account let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_100); @@ -3149,8 +3130,7 @@ fn inner_forest_preserves_mixed_slots_independently() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(block_100); + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); // Vault: block 1 is most recent, should NOT be pruned // Map A: block 1 is old (block 51 is newer), SHOULD be pruned @@ -3160,13 +3140,9 @@ fn inner_forest_preserves_mixed_slots_independently() { "Vault root from block 1 should NOT be pruned (most recent)" ); assert_eq!( - storage_roots_removed, 1, + storage_roots_removed, 0, "Map A from block 1 should be pruned (block 51 is newer); Map B should NOT" ); - assert_eq!( - storage_entries_removed, 0, - "Storage entries are LRU-cached, not counted in prune results" - ); // Verify vault is still accessible let vault_root_at_100 = forest diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 4b340e349..c82112594 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -70,7 +70,6 @@ pub enum WitnessError { /// Snapshot of storage map entries at a specific block. struct StorageSnapshot { - block_num: BlockNumber, entries: BTreeMap, } @@ -93,7 +92,8 @@ pub(crate) struct InnerForest { /// LRU cache of latest storage map entries for `SlotData::All` queries. /// Only stores the most recent snapshot per (account, slot). /// Historical queries fall back to DB. - storage_entries_per_user_block_slot: LruCache<(AccountId, StorageSlotName), StorageSnapshot>, + storage_entries_per_block_per_account_per_slot: + LruCache<(BlockNumber, AccountId, StorageSlotName), StorageSnapshot>, vault_refcount: HashMap, storage_slots_refcount: HashMap, @@ -117,7 +117,7 @@ impl InnerForest { Self { forest: SmtForest::new(), storage_map_roots: BTreeMap::new(), - storage_entries_per_user_block_slot: LruCache::new( + storage_entries_per_block_per_account_per_slot: LruCache::new( NonZeroUsize::new(DEFAULT_STORAGE_CACHE_ENTRIES_SIZE).unwrap(), ), vault_refcount: HashMap::new(), @@ -136,11 +136,9 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } - fn increment_refcount(map: &mut HashMap, root: Word) { - let entry = map.entry(root).or_insert(0); - *entry += 1; - } - + /// Decrement the reference count in the given map. + /// + /// Returns `true` if the refcount reached zero. fn decrement_refcount(map: &mut HashMap, root: Word) -> bool { let Some(count) = map.get_mut(&root) else { return false; @@ -154,6 +152,9 @@ impl InnerForest { } } + // ACCESSORS + // -------------------------------------------------------------------------------------------- + /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, @@ -182,6 +183,9 @@ impl InnerForest { .map(|(_, root)| *root) } + // WITNESSES and PROOFS + // -------------------------------------------------------------------------------------------- + /// Retrieves a storage map witness for the specified account and storage slot. /// /// Finds the most recent witness at or before the specified block number. @@ -228,7 +232,7 @@ impl InnerForest { /// /// Returns `None` if no storage root is tracked for this account/slot/block combination. /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. - pub(crate) fn open_storage_map( + pub(crate) fn get_storage_map_details_for_keys( &self, account_id: AccountId, slot_name: StorageSlotName, @@ -237,7 +241,6 @@ impl InnerForest { ) -> Option> { let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; - // Collect SMT proofs for each key let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { let key = StorageMap::hash_key(*raw_key); self.forest.open(root, key) @@ -254,20 +257,18 @@ impl InnerForest { /// - Query is for a historical block (not the most recent) /// /// Returns `LimitExceeded` if there are too many entries to return. - pub(crate) fn storage_map_entries( + pub(crate) fn get_storage_map_details_full_from_cache( &mut self, account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, ) -> Option { // Get cached snapshot - let snapshot = - self.storage_entries_per_user_block_slot.get(&(account_id, slot_name.clone()))?; - - // Only serve queries for the latest block - if snapshot.block_num != block_num { - return None; // Historical query - caller should use DB - } + let snapshot = self.storage_entries_per_block_per_account_per_slot.get(&( + block_num, + account_id, + slot_name.clone(), + ))?; if snapshot.entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { return Some(AccountStorageMapDetails { @@ -382,9 +383,7 @@ impl InnerForest { // so that the map has entries for all accounts, and then return (i.e., no need to insert // anything into the forest) if delta.is_empty() { - self.vault_roots.insert((account_id, block_num), prev_root); - self.vault_roots_by_block.entry(block_num).or_default().push(account_id); - Self::increment_refcount(&mut self.vault_refcount, prev_root); + self.track_vault_root(block_num, account_id, prev_root); return; } @@ -412,9 +411,7 @@ impl InnerForest { .batch_insert(prev_root, entries) .expect("forest insertion should succeed"); - self.vault_roots.insert((account_id, block_num), new_root); - self.vault_roots_by_block.entry(block_num).or_default().push(account_id); - Self::increment_refcount(&mut self.vault_refcount, new_root); + self.track_vault_root(block_num, account_id, new_root); tracing::debug!( target: crate::COMPONENT, @@ -425,6 +422,12 @@ impl InnerForest { ); } + fn track_vault_root(&mut self, block_num: BlockNumber, account_id: AccountId, new_root: Word) { + self.vault_roots.insert((account_id, block_num), new_root); + self.vault_roots_by_block.entry(block_num).or_default().push(account_id); + *self.vault_refcount.entry(new_root).or_insert(0) += 1; + } + /// Updates the forest with vault changes from a delta. The vault delta is assumed to be /// non-empty. /// @@ -491,7 +494,6 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - assert!(!entries.is_empty(), "non-empty delta should contain entries"); let num_entries = entries.len(); let new_root = self @@ -499,9 +501,7 @@ impl InnerForest { .batch_insert(prev_root, entries) .expect("forest insertion should succeed"); - self.vault_roots.insert((account_id, block_num), new_root); - self.vault_roots_by_block.entry(block_num).or_default().push(account_id); - Self::increment_refcount(&mut self.vault_refcount, new_root); + self.track_vault_root(block_num, account_id, new_root); tracing::debug!( target: crate::COMPONENT, @@ -563,18 +563,12 @@ impl InnerForest { // if the delta is empty, make sure we create an entry in the storage map roots map // and update the cache if map_entries.is_empty() { - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), prev_root); - self.storage_slots_by_block - .entry(block_num) - .or_default() - .push((account_id, slot_name.clone())); - Self::increment_refcount(&mut self.storage_slots_refcount, prev_root); + self.track_storage_map_slot_root(block_num, account_id, slot_name, prev_root); // Update cache with empty map - self.storage_entries_per_user_block_slot.put( - (account_id, slot_name.clone()), - StorageSnapshot { block_num, entries: BTreeMap::new() }, + self.storage_entries_per_block_per_account_per_slot.put( + (block_num, account_id, slot_name.clone()), + StorageSnapshot { entries: BTreeMap::new() }, ); continue; @@ -586,21 +580,14 @@ impl InnerForest { .batch_insert(prev_root, map_entries.iter().copied()) .expect("forest insertion should succeed"); - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); - self.storage_slots_by_block - .entry(block_num) - .or_default() - .push((account_id, slot_name.clone())); - Self::increment_refcount(&mut self.storage_slots_refcount, new_root); + self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); - assert!(!map_entries.is_empty(), "a non-empty delta should have entries"); let num_entries = map_entries.len(); // Update cache with the entries from this insertion let entries = BTreeMap::from_iter(map_entries); - self.storage_entries_per_user_block_slot - .put((account_id, slot_name.clone()), StorageSnapshot { block_num, entries }); + self.storage_entries_per_block_per_account_per_slot + .put((block_num, account_id, slot_name.clone()), StorageSnapshot { entries }); tracing::debug!( target: crate::COMPONENT, @@ -613,6 +600,22 @@ impl InnerForest { } } + fn track_storage_map_slot_root( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + slot_name: &StorageSlotName, + new_root: Word, + ) { + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), new_root); + self.storage_slots_by_block + .entry(block_num) + .or_default() + .push((account_id, slot_name.clone())); + *self.storage_slots_refcount.entry(new_root).or_insert(0) += 1; + } + /// Updates the forest with storage map changes from a delta. /// /// Processes storage map slot deltas, building SMTs for each modified slot and tracking the @@ -623,8 +626,6 @@ impl InnerForest { account_id: AccountId, delta: &AccountStorageDelta, ) { - assert!(!delta.is_empty(), "expected the delta not to be empty"); - for (slot_name, map_delta) in delta.maps() { // map delta shouldn't be empty, but if it is for some reason, there is nothing to do if map_delta.is_empty() { @@ -641,32 +642,14 @@ impl InnerForest { .batch_insert(prev_root, delta_entries.iter().copied()) .expect("forest insertion should succeed"); - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); - self.storage_slots_by_block - .entry(block_num) - .or_default() - .push((account_id, slot_name.clone())); - Self::increment_refcount(&mut self.storage_slots_refcount, new_root); - - // Update cache by merging delta with latest entries - let key = (account_id, slot_name.clone()); - let mut latest_entries = self - .storage_entries_per_user_block_slot - .get(&key) - .map(|s| s.entries.clone()) - .unwrap_or_default(); - - for (k, v) in &delta_entries { - if *v == EMPTY_WORD { - latest_entries.remove(k); - } else { - latest_entries.insert(*k, *v); - } - } + self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); - self.storage_entries_per_user_block_slot - .put(key, StorageSnapshot { block_num, entries: latest_entries }); + self.update_storage_map_slot_cache_entry( + block_num, + account_id, + slot_name, + &delta_entries, + ); tracing::debug!( target: crate::COMPONENT, @@ -679,6 +662,39 @@ impl InnerForest { } } + /// Update the storage map using the given set of key-value-entries. + fn update_storage_map_slot_cache_entry( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + slot_name: &StorageSlotName, + delta_entries: &Vec<(Word, Word)>, + ) { + // Update cache by merging delta with latest entries + let key = (block_num, account_id, slot_name.clone()); + let mut latest_entries = self + .storage_entries_per_block_per_account_per_slot + .iter() + .filter(|((_, cached_account_id, cached_slot_name), _)| { + *cached_account_id == account_id && cached_slot_name == slot_name + }) + .map(|((cached_block, ..), snapshot)| (*cached_block, snapshot)) + .max_by_key(|(cached_block, _)| cached_block.as_u32()) + .map(|(_, snapshot)| snapshot.entries.clone()) + .unwrap_or_default(); + + for (k, v) in delta_entries { + if *v == EMPTY_WORD { + latest_entries.remove(k); + } else { + latest_entries.insert(*k, *v); + } + } + + self.storage_entries_per_block_per_account_per_slot + .put(key, StorageSnapshot { entries: latest_entries }); + } + // PRUNING // -------------------------------------------------------------------------------------------- @@ -693,7 +709,7 @@ impl InnerForest { /// Note: Returns (`vault_roots_removed`, `storage_roots_removed`). Storage entries count is /// no longer tracked since we use an LRU cache. #[instrument(target = COMPONENT, skip_all, fields(block.number = %chain_tip), ret)] - pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> (usize, usize, usize) { + pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> (usize, usize) { let cutoff_block = BlockNumber::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); @@ -701,7 +717,7 @@ impl InnerForest { let storage_roots_removed = self.prune_storage_roots(cutoff_block); // Cache is self-pruning via LRU eviction - (vault_roots_removed, storage_roots_removed, 0) + (vault_roots_removed, storage_roots_removed) } /// Prunes vault roots beyond the cutoff block. @@ -712,12 +728,11 @@ impl InnerForest { // Get blocks to prune (only blocks before cutoff) let blocks_to_check: Vec = self .vault_roots_by_block - .range(..cutoff_block) + .range(..=cutoff_block) .map(|(block, _)| *block) .collect(); let mut roots_to_prune = HashSet::new(); - let mut roots_removed = 0usize; for block in blocks_to_check { let Some(accounts) = self.vault_roots_by_block.remove(&block) else { @@ -736,7 +751,6 @@ impl InnerForest { if has_newer_entry { if let Some(root) = self.vault_roots.remove(&(account_id, block)) { - roots_removed += 1; if Self::decrement_refcount(&mut self.vault_refcount, root) { roots_to_prune.insert(root); } @@ -751,6 +765,7 @@ impl InnerForest { } } + let roots_removed = roots_to_prune.len(); self.forest.pop_smts(roots_to_prune); roots_removed } @@ -763,12 +778,11 @@ impl InnerForest { // Get blocks to prune (only blocks before cutoff) let blocks_to_check: Vec = self .storage_slots_by_block - .range(..cutoff_block) + .range(..=cutoff_block) .map(|(block, _)| *block) .collect(); let mut roots_to_prune = HashSet::new(); - let mut roots_removed = 0usize; for block in blocks_to_check { let Some(slots) = self.storage_slots_by_block.remove(&block) else { @@ -791,7 +805,6 @@ impl InnerForest { if has_newer_entry { let key = (account_id, slot_name.clone(), block); if let Some(root) = self.storage_map_roots.remove(&key) { - roots_removed += 1; if Self::decrement_refcount(&mut self.storage_slots_refcount, root) { roots_to_prune.insert(root); } @@ -806,6 +819,7 @@ impl InnerForest { } } + let roots_removed = roots_to_prune.len(); self.forest.pop_smts(roots_to_prune); roots_removed } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 4dbef7ff8..7b0aeb002 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -281,12 +281,10 @@ fn vault_shared_root_retained_when_one_entry_pruned() { forest.update_account(block_at_51, &delta_2_update).unwrap(); let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); - let (vault_roots_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(block_at_52); + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_at_52); - assert_eq!(vault_roots_removed, 1); + assert_eq!(vault_roots_removed, 0); assert_eq!(storage_roots_removed, 0); - assert_eq!(storage_entries_removed, 0); assert!(forest.vault_roots.contains_key(&(account1, block_1))); assert!(!forest.vault_roots.contains_key(&(account2, block_1))); assert_eq!(forest.vault_roots_by_block[&block_1], vec![account1]); @@ -405,7 +403,8 @@ fn storage_map_empty_entries_query() { "storage_map_roots should have an entry for the empty map" ); - let result = forest.storage_map_entries(account_id, slot_name.clone(), block_num); + let result = + forest.get_storage_map_details_full_from_cache(account_id, slot_name.clone(), block_num); assert!(result.is_some(), "storage_map_entries should return Some for empty maps"); let details = result.unwrap(); @@ -447,7 +446,8 @@ fn storage_map_open_returns_proofs() { forest.update_account(block_num, &delta).unwrap(); let keys: Vec = (0..20u32).map(|i| Word::from([i, 0, 0, 0])).collect(); - let result = forest.open_storage_map(account_id, slot_name.clone(), block_num, &keys); + let result = + forest.get_storage_map_details_for_keys(account_id, slot_name.clone(), block_num, &keys); let details = result.expect("Should return Some").expect("Should not error"); assert_matches!(details.entries, StorageMapEntries::EntriesWithProofs(entries) => { @@ -466,12 +466,10 @@ const TEST_PRUNE_CHAIN_TIP: u32 = HISTORICAL_BLOCK_RETENTION + 5; fn prune_handles_empty_forest() { let mut forest = InnerForest::new(); - let (vault_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(BlockNumber::GENESIS); + let (vault_removed, storage_roots_removed) = forest.prune(BlockNumber::GENESIS); assert_eq!(vault_removed, 0); assert_eq!(storage_roots_removed, 0); - assert_eq!(storage_entries_removed, 0); // Always 0 now (LRU cache) } #[test] @@ -510,12 +508,10 @@ fn prune_removes_smt_roots_from_forest() { let storage_root_pruned = forest.storage_map_roots[&(account_id, slot_name.clone(), pruned_block)]; - let (vault_removed, storage_roots_removed, storage_entries_removed) = - forest.prune(retained_block); + let (vault_removed, storage_roots_removed) = forest.prune(retained_block); assert!(vault_removed > 0); assert!(storage_roots_removed > 0); - assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted assert!(forest.vault_roots.contains_key(&(account_id, retained_block))); assert!(!forest.vault_roots.contains_key(&(account_id, pruned_block))); assert!(!forest.storage_map_roots.contains_key(&(account_id, slot_name, pruned_block))); @@ -544,12 +540,11 @@ fn prune_respects_retention_boundary() { forest.update_account(block_num, &delta).unwrap(); } - let (vault_removed, storage_roots_removed, storage_entries_removed) = + let (vault_removed, storage_roots_removed) = forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); assert_eq!(vault_removed, 0); assert_eq!(storage_roots_removed, 0); - assert_eq!(storage_entries_removed, 0); assert_eq!(forest.vault_roots.len(), HISTORICAL_BLOCK_RETENTION as usize); } @@ -572,15 +567,15 @@ fn prune_vault_roots_removes_old_entries() { let (vault_removed, ..) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION - 1) as usize; + let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; assert_eq!(vault_removed, expected_removed); - let expected_remaining = (HISTORICAL_BLOCK_RETENTION + 1) as usize; + let expected_remaining = HISTORICAL_BLOCK_RETENTION as usize; assert_eq!(forest.vault_roots.len(), expected_remaining); let remaining_blocks = Vec::from_iter(forest.vault_roots.keys().map(|(_, b)| b.as_u32())); let oldest_remaining = *remaining_blocks.iter().min().unwrap(); - let expected_oldest = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION; + let expected_oldest = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION + 1; assert_eq!(oldest_remaining, expected_oldest); } @@ -606,17 +601,15 @@ fn prune_storage_map_roots_removes_old_entries() { assert_eq!(forest.storage_map_roots.len(), TEST_CHAIN_LENGTH as usize); - let (_, storage_roots_removed, storage_entries_removed) = - forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + let (_, storage_roots_removed) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION - 1) as usize; + let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; assert_eq!(storage_roots_removed, expected_removed); - assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted - let expected_remaining = (HISTORICAL_BLOCK_RETENTION + 1) as usize; + let expected_remaining = HISTORICAL_BLOCK_RETENTION as usize; assert_eq!(forest.storage_map_roots.len(), expected_remaining); // Cache size: LRU may have evicted entries, just verify it's populated - assert!(!forest.storage_entries_per_user_block_slot.is_empty()); + assert!(!forest.storage_entries_per_block_per_account_per_slot.is_empty()); } #[test] @@ -643,13 +636,13 @@ fn prune_handles_multiple_accounts() { assert_eq!(forest.vault_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); - let (vault_removed, ..) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + let (vault_removed, _) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - let expected_removed_per_account = - (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION - 1) as usize; - assert_eq!(vault_removed, expected_removed_per_account * 2); + let expected_removed_per_account = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; + assert!(vault_removed > 0); + assert!(vault_removed <= expected_removed_per_account * 2); - let expected_remaining_per_account = (HISTORICAL_BLOCK_RETENTION + 1) as usize; + let expected_remaining_per_account = HISTORICAL_BLOCK_RETENTION as usize; let account1_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account1).count(); let account2_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account2).count(); assert_eq!(account1_entries, expected_remaining_per_account); @@ -685,18 +678,20 @@ fn prune_handles_multiple_slots() { assert_eq!(forest.storage_map_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); - let (_, storage_roots_removed, storage_entries_removed) = forest.prune(chain_tip); + let (_, storage_roots_removed) = forest.prune(chain_tip); let cutoff = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION; - let expected_removed_per_slot = cutoff - 1; + let expected_removed_per_slot = cutoff; let expected_removed = expected_removed_per_slot * 2; assert_eq!(storage_roots_removed, expected_removed as usize); - assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted - let expected_remaining = HISTORICAL_BLOCK_RETENTION + 1; + let expected_remaining = HISTORICAL_BLOCK_RETENTION; assert_eq!(forest.storage_map_roots.len(), (expected_remaining * 2) as usize); - // Cache contains 2 latest entries (one per slot) - assert_eq!(forest.storage_entries_per_user_block_slot.len(), 2); + // Cache contains an entry per block/slot update + assert_eq!( + forest.storage_entries_per_block_per_account_per_slot.len(), + (TEST_CHAIN_LENGTH * 2) as usize + ); } #[test] @@ -744,7 +739,7 @@ fn prune_preserves_most_recent_state_per_entity() { // Block 100: Prune let block_100 = BlockNumber::from(100); - let (vault_removed, storage_roots_removed, storage_entries_removed) = forest.prune(block_100); + let (vault_removed, storage_roots_removed) = forest.prune(block_100); // Vault at block 1 preserved (most recent) assert_eq!(vault_removed, 0); @@ -762,7 +757,6 @@ fn prune_preserves_most_recent_state_per_entity() { assert!(forest.storage_map_roots.contains_key(&(account_id, slot_map_b, block_1))); assert_eq!(storage_roots_removed, 1); - assert_eq!(storage_entries_removed, 0); // Cache is LRU, not counted } #[test] @@ -797,16 +791,16 @@ fn prune_preserves_entries_within_retention_window() { // Block 100: Prune (retention window = 50 blocks, cutoff = 50) let block_100 = BlockNumber::from(100); - let (vault_removed, storage_roots_removed, _) = forest.prune(block_100); + let (vault_removed, storage_roots_removed) = forest.prune(block_100); - // Blocks 1 and 25 pruned (outside retention, have newer entries) - assert_eq!(vault_removed, 2); - assert_eq!(storage_roots_removed, 2); + // Blocks 1, 25, and 50 pruned (outside retention, have newer entries) + assert_eq!(vault_removed, 3); + assert_eq!(storage_roots_removed, 3); // Verify preserved entries assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(1)))); assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(25)))); - assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(50)))); + assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(50)))); assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(75)))); assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(100)))); } diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 14d7d0954..33c73df40 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -23,6 +23,7 @@ use miden_node_proto::domain::account::{ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; +use miden_node_utils::limiter::{QueryParamLimiter, QueryParamStorageMapKeyTotalLimit}; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; @@ -1049,7 +1050,7 @@ impl State { /// /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. /// Returns an error if the forest doesn't have data for the requested slot. - /// All-entries queries (`SlotData::All`) use the forest to return all entries. + /// All-entries queries (`SlotData::All`) use the forest to request;;. async fn fetch_public_account_details( &self, account_id: AccountId, @@ -1107,7 +1108,12 @@ impl State { for StorageMapRequest { slot_name, slot_data } in storage_requests { let details = match &slot_data { SlotData::MapKeys(keys) => forest_guard - .open_storage_map(account_id, slot_name.clone(), block_num, keys) + .get_storage_map_details_for_keys( + account_id, + slot_name.clone(), + block_num, + keys, + ) .ok_or_else(|| DatabaseError::StorageRootNotFound { account_id, slot_name: slot_name.to_string(), @@ -1116,20 +1122,29 @@ impl State { .map_err(DatabaseError::MerkleError)?, SlotData::All => { // Try cache first (latest block only) - if let Some(details) = - forest_guard.storage_map_entries(account_id, slot_name.clone(), block_num) - { + if let Some(details) = forest_guard.get_storage_map_details_full_from_cache( + account_id, + slot_name.clone(), + block_num, + ) { details } else { // we don't want to hold the forest guard for a prolonged time drop(forest_guard); - // TODO we collect all storage items + // we collect all storage items, if the account is small enough or + // return `AccountStorageMapDetails::LimitExceeded` let details = self .db .reconstruct_storage_map_from_db( account_id, slot_name.clone(), block_num, + Some( + // TODO unify this with + // `AccountStorageMapDetails::MAX_RETURN_ENTRIES` + // and accumulated the limits + ::LIMIT, + ), ) .await?; forest_guard = self.forest.write().await; @@ -1158,7 +1173,7 @@ impl State { account_id: AccountId, block_range: RangeInclusive, ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range).await + self.db.select_storage_map_sync_values(account_id, block_range, None).await } /// Loads a block from the block store. Return `Ok(None)` if the block is not found. From edeb505d0f9e446088937280f683234955e2b362 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 11 Feb 2026 15:10:41 +0100 Subject: [PATCH 03/18] y --- crates/store/src/db/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index eef076ca2..11607a21c 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -2618,7 +2618,7 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { // should be pruned assert_eq!(storage_roots_removed, 0); - // ensure the root is stil accessible + // ensure the root is still accessible let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block01); assert!(account1_root_after_prune.is_some()); From 74efb5a6ff53d62e9a78208380122f34274badd8 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 16 Feb 2026 17:47:02 +0100 Subject: [PATCH 04/18] yes --- Cargo.lock | 1 + crates/store/Cargo.toml | 1 + crates/store/src/db/mod.rs | 47 ++++++- .../store/src/db/models/queries/accounts.rs | 4 +- crates/store/src/db/tests.rs | 123 +++++++++++++++++- crates/store/src/inner_forest/mod.rs | 59 +++++---- crates/store/src/inner_forest/tests.rs | 67 +++++++++- crates/store/src/state/mod.rs | 9 ++ 8 files changed, 271 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3dda03df0..8b99bac18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2923,6 +2923,7 @@ dependencies = [ "rand_chacha 0.9.0", "regex", "serde", + "tempfile", "termtree", "thiserror 2.0.18", "tokio", diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index d28c61572..7f1f6901b 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -56,6 +56,7 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } +tempfile = { workspace = true } termtree = { version = "0.5" } [features] diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index ed087991a..dfecae63e 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,4 +1,5 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::mem::size_of; use std::ops::RangeInclusive; use std::path::PathBuf; @@ -639,23 +640,55 @@ impl Db { ) -> Result { use miden_node_proto::domain::account::AccountStorageMapDetails; use miden_protocol::EMPTY_WORD; + use miden_protocol::account::StorageSlotName; // TODO this remains expensive with a large history until we implement pruning for DB // columns - let values = self + let mut values = Vec::new(); + let mut block_range_start = BlockNumber::GENESIS; + let entries_limit = entries_limit.unwrap_or_else(|| { + // TODO: These limits should be given by the protocol. + // See miden-base/issues/1770 for more details + pub const ROW_OVERHEAD_BYTES: usize = + 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx + MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES + }); + + let mut page = self .select_storage_map_sync_values( account_id, - BlockNumber::GENESIS..=block_num, - entries_limit, + block_range_start..=block_num, + Some(entries_limit), ) .await?; - if values.last_block_included != block_num { - return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + + values.extend(page.values); + + loop { + if page.last_block_included == block_num || page.last_block_included < block_range_start + { + break; + } + + block_range_start = page.last_block_included.child(); + page = self + .select_storage_map_sync_values( + account_id, + block_range_start..=block_num, + Some(entries_limit), + ) + .await?; + + values.extend(page.values); + } + + if page.last_block_included != block_num { + return Ok(AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0))); } // Filter to the specific slot and collect latest values per key let mut latest_values = BTreeMap::::new(); - for value in values.values { + for value in values { if value.slot_name == slot_name { latest_values.insert(value.key, value.value); } @@ -665,7 +698,7 @@ impl Db { latest_values.retain(|_, v| *v != EMPTY_WORD); if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { - return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + return Ok(AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0))); } let entries = Vec::from_iter(latest_values.into_iter()); diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index b0f0c12ea..af5fdbc94 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -703,7 +703,9 @@ pub(crate) fn select_account_storage_map_values_paged( } else { ( *block_range.end(), - raw.into_iter().map(StorageMapValue::from_raw_row).collect::>()?, + raw.into_iter() + .map(StorageMapValue::from_raw_row) + .collect::, _>>()?, ) }; diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 11607a21c..33d4d3a0f 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -6,7 +6,7 @@ use std::sync::{Arc, Mutex}; use assert_matches::assert_matches; use diesel::{Connection, SqliteConnection}; -use miden_node_proto::domain::account::AccountSummary; +use miden_node_proto::domain::account::{AccountSummary, StorageMapEntries}; use miden_node_utils::fee::{test_fee, test_fee_params}; use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::delta::AccountUpdateDetails; @@ -73,6 +73,7 @@ use miden_standards::code_builder::CodeBuilder; use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; use pretty_assertions::assert_eq; use rand::Rng; +use tempfile::tempdir; use super::{AccountInfo, NoteRecord, NullifierInfo}; use crate::db::TransactionSummary; @@ -1226,6 +1227,126 @@ fn select_storage_map_sync_values() { assert_eq!(page.values, expected, "should return latest values ordered by key"); } +#[test] +fn select_storage_map_sync_values_paginates_until_last_block() { + let mut conn = create_db(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(7); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + create_block(&mut conn, block1); + create_block(&mut conn, block2); + create_block(&mut conn, block3); + + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + num_to_word(1), + num_to_word(11), + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + num_to_word(2), + num_to_word(22), + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block3, + slot_name.clone(), + num_to_word(3), + num_to_word(33), + ) + .unwrap(); + + let page = queries::select_account_storage_map_values_paged( + &mut conn, + account_id, + BlockNumber::GENESIS..=block3, + 1, + ) + .unwrap(); + + assert_eq!(page.last_block_included, block1, "should truncate at block 1"); + assert_eq!(page.values.len(), 1, "should include block 1 only"); +} + +#[tokio::test] +#[miden_node_test_macro::enable_logging] +async fn reconstruct_storage_map_from_db_pages_until_latest() { + let temp_dir = tempdir().unwrap(); + let db_path = temp_dir.path().join("store.sqlite"); + + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(9); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + let db = crate::db::Db::load(db_path).await.unwrap(); + let slot_name_for_db = slot_name.clone(); + db.query("insert paged values", move |db_conn| { + db_conn.transaction(|db_conn| { + apply_migrations(db_conn)?; + create_block(db_conn, block1); + create_block(db_conn, block2); + create_block(db_conn, block3); + + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 0)], block1)?; + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 1)], block2)?; + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 2)], block3)?; + + queries::insert_account_storage_map_value( + db_conn, + account_id, + block1, + slot_name_for_db.clone(), + num_to_word(1), + num_to_word(10), + )?; + queries::insert_account_storage_map_value( + db_conn, + account_id, + block2, + slot_name_for_db.clone(), + num_to_word(2), + num_to_word(20), + )?; + queries::insert_account_storage_map_value( + db_conn, + account_id, + block3, + slot_name_for_db.clone(), + num_to_word(3), + num_to_word(30), + )?; + Ok::<_, DatabaseError>(()) + }) + }) + .await + .unwrap(); + + let details = db + .reconstruct_storage_map_from_db(account_id, slot_name.clone(), block3, Some(1)) + .await + .unwrap(); + + assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 3); + }); +} + // UTILITIES // ------------------------------------------------------------------------------------------- fn num_to_word(n: u64) -> Word { diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index c82112594..dad9e9050 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -34,7 +34,7 @@ pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; /// Default size for the LRU cache of latest storage map entries. /// Used to serve `SlotData::All` queries for the most recent block. -const DEFAULT_STORAGE_CACHE_ENTRIES_SIZE: usize = 10_000; +const DEFAULT_STORAGE_CACHE_ENTRIES_SIZE: usize = 5; // ERRORS // ================================================================================================ @@ -70,6 +70,7 @@ pub enum WitnessError { /// Snapshot of storage map entries at a specific block. struct StorageSnapshot { + block_num: BlockNumber, entries: BTreeMap, } @@ -92,8 +93,7 @@ pub(crate) struct InnerForest { /// LRU cache of latest storage map entries for `SlotData::All` queries. /// Only stores the most recent snapshot per (account, slot). /// Historical queries fall back to DB. - storage_entries_per_block_per_account_per_slot: - LruCache<(BlockNumber, AccountId, StorageSlotName), StorageSnapshot>, + storage_entries_per_account_per_slot: LruCache<(AccountId, StorageSlotName), StorageSnapshot>, vault_refcount: HashMap, storage_slots_refcount: HashMap, @@ -117,7 +117,7 @@ impl InnerForest { Self { forest: SmtForest::new(), storage_map_roots: BTreeMap::new(), - storage_entries_per_block_per_account_per_slot: LruCache::new( + storage_entries_per_account_per_slot: LruCache::new( NonZeroUsize::new(DEFAULT_STORAGE_CACHE_ENTRIES_SIZE).unwrap(), ), vault_refcount: HashMap::new(), @@ -264,11 +264,13 @@ impl InnerForest { block_num: BlockNumber, ) -> Option { // Get cached snapshot - let snapshot = self.storage_entries_per_block_per_account_per_slot.get(&( - block_num, - account_id, - slot_name.clone(), - ))?; + let snapshot = self + .storage_entries_per_account_per_slot + .get(&(account_id, slot_name.clone()))?; + + if snapshot.block_num != block_num { + return None; + } if snapshot.entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { return Some(AccountStorageMapDetails { @@ -281,6 +283,18 @@ impl InnerForest { Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) } + pub(crate) fn cache_storage_map_entries( + &mut self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + entries: Vec<(Word, Word)>, + ) { + let entries = BTreeMap::from_iter(entries); + self.storage_entries_per_account_per_slot + .put((account_id, slot_name), StorageSnapshot { block_num, entries }); + } + // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- @@ -566,9 +580,9 @@ impl InnerForest { self.track_storage_map_slot_root(block_num, account_id, slot_name, prev_root); // Update cache with empty map - self.storage_entries_per_block_per_account_per_slot.put( - (block_num, account_id, slot_name.clone()), - StorageSnapshot { entries: BTreeMap::new() }, + self.storage_entries_per_account_per_slot.put( + (account_id, slot_name.clone()), + StorageSnapshot { block_num, entries: BTreeMap::new() }, ); continue; @@ -586,8 +600,8 @@ impl InnerForest { // Update cache with the entries from this insertion let entries = BTreeMap::from_iter(map_entries); - self.storage_entries_per_block_per_account_per_slot - .put((block_num, account_id, slot_name.clone()), StorageSnapshot { entries }); + self.storage_entries_per_account_per_slot + .put((account_id, slot_name.clone()), StorageSnapshot { block_num, entries }); tracing::debug!( target: crate::COMPONENT, @@ -671,16 +685,11 @@ impl InnerForest { delta_entries: &Vec<(Word, Word)>, ) { // Update cache by merging delta with latest entries - let key = (block_num, account_id, slot_name.clone()); + let key = (account_id, slot_name.clone()); let mut latest_entries = self - .storage_entries_per_block_per_account_per_slot - .iter() - .filter(|((_, cached_account_id, cached_slot_name), _)| { - *cached_account_id == account_id && cached_slot_name == slot_name - }) - .map(|((cached_block, ..), snapshot)| (*cached_block, snapshot)) - .max_by_key(|(cached_block, _)| cached_block.as_u32()) - .map(|(_, snapshot)| snapshot.entries.clone()) + .storage_entries_per_account_per_slot + .get(&key) + .map(|snapshot| snapshot.entries.clone()) .unwrap_or_default(); for (k, v) in delta_entries { @@ -691,8 +700,8 @@ impl InnerForest { } } - self.storage_entries_per_block_per_account_per_slot - .put(key, StorageSnapshot { entries: latest_entries }); + self.storage_entries_per_account_per_slot + .put(key, StorageSnapshot { block_num, entries: latest_entries }); } // PRUNING diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 7b0aeb002..4a27701ed 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -23,6 +23,10 @@ fn dummy_fungible_asset(faucet_id: AccountId, amount: u64) -> Asset { FungibleAsset::new(faucet_id, amount).unwrap().into() } +fn num_to_word(n: u64) -> Word { + [Felt::ZERO, Felt::ZERO, Felt::ZERO, Felt::new(n)].into() +} + /// Creates a partial `AccountDelta` (without code) for testing incremental updates. fn dummy_partial_delta( account_id: AccountId, @@ -455,6 +459,60 @@ fn storage_map_open_returns_proofs() { }); } +#[test] +fn storage_map_all_entries_uses_db_after_cache_eviction() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + + for slot_index in 0..6u32 { + let slot_name = StorageSlotName::mock(slot_index as usize); + let block_num = BlockNumber::from(slot_index + 1); + let key = num_to_word(u64::from(slot_index + 1)); + let value = num_to_word(u64::from(slot_index + 1) * 10); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + let evicted_slot = StorageSlotName::mock(0); + assert!( + forest + .storage_entries_per_account_per_slot + .get(&(account_id, evicted_slot.clone())) + .is_none(), + "oldest slot should be evicted from LRU" + ); + + let db_entries = vec![(num_to_word(1), num_to_word(10))]; + forest.cache_storage_map_entries( + account_id, + evicted_slot.clone(), + BlockNumber::from(1), + db_entries.clone(), + ); + + let details = forest + .get_storage_map_details_full_from_cache( + account_id, + evicted_slot.clone(), + BlockNumber::from(1), + ) + .expect("cache should return details after fallback"); + + assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries, db_entries); + }); +} + // PRUNING TESTS // ================================================================================================ @@ -609,7 +667,7 @@ fn prune_storage_map_roots_removes_old_entries() { let expected_remaining = HISTORICAL_BLOCK_RETENTION as usize; assert_eq!(forest.storage_map_roots.len(), expected_remaining); // Cache size: LRU may have evicted entries, just verify it's populated - assert!(!forest.storage_entries_per_block_per_account_per_slot.is_empty()); + assert!(!forest.storage_entries_per_account_per_slot.is_empty()); } #[test] @@ -687,11 +745,8 @@ fn prune_handles_multiple_slots() { let expected_remaining = HISTORICAL_BLOCK_RETENTION; assert_eq!(forest.storage_map_roots.len(), (expected_remaining * 2) as usize); - // Cache contains an entry per block/slot update - assert_eq!( - forest.storage_entries_per_block_per_account_per_slot.len(), - (TEST_CHAIN_LENGTH * 2) as usize - ); + // Cache contains an entry per slot + assert_eq!(forest.storage_entries_per_account_per_slot.len(), 2); } #[test] diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 33c73df40..06f3298b1 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -18,6 +18,7 @@ use miden_node_proto::domain::account::{ AccountStorageMapDetails, AccountVaultDetails, SlotData, + StorageMapEntries, StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; @@ -1148,6 +1149,14 @@ impl State { ) .await?; forest_guard = self.forest.write().await; + if let StorageMapEntries::AllEntries(entries) = details.entries.clone() { + forest_guard.cache_storage_map_entries( + account_id, + slot_name.clone(), + block_num, + entries, + ); + } details } }, From b20160201808c1ceb0ce097cbe335c9b8ae2f47f Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 16 Feb 2026 18:54:02 +0100 Subject: [PATCH 05/18] mepty root --- Cargo.toml | 1 + crates/store/src/inner_forest/mod.rs | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d9af227ef..19a4ef53d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,7 @@ rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } +tempfile = { version = "3.12" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index dad9e9050..45c2cd7c9 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -663,6 +663,7 @@ impl InnerForest { account_id, slot_name, &delta_entries, + prev_root, ); tracing::debug!( @@ -683,14 +684,24 @@ impl InnerForest { account_id: AccountId, slot_name: &StorageSlotName, delta_entries: &Vec<(Word, Word)>, + prev_root: Word, ) { // Update cache by merging delta with latest entries let key = (account_id, slot_name.clone()); - let mut latest_entries = self + let Some(mut latest_entries) = self .storage_entries_per_account_per_slot .get(&key) .map(|snapshot| snapshot.entries.clone()) - .unwrap_or_default(); + .or_else(|| { + if prev_root == Self::empty_smt_root() { + Some(BTreeMap::new()) + } else { + None + } + }) + else { + return; + }; for (k, v) in delta_entries { if *v == EMPTY_WORD { From e2c52890be31e1888848bc723ee2ff13a4967c5d Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 16 Feb 2026 19:04:07 +0100 Subject: [PATCH 06/18] fix test --- crates/store/src/db/tests.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 33d4d3a0f..1d6813afa 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -2506,6 +2506,13 @@ fn inner_forest_matches_db_storage_map_roots_across_updates() { create_block(&mut conn, block2); create_block(&mut conn, block3); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) + .unwrap(); + let slot_map = StorageSlotName::mock(1); let slot_value = StorageSlotName::mock(2); From a938411eada69213483cc0faa4bede0d345802f7 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 16 Feb 2026 20:37:28 +0100 Subject: [PATCH 07/18] hashing keys fun, deliberate naming --- crates/store/src/db/mod.rs | 15 +++++++++------ crates/store/src/db/tests.rs | 9 ++++++++- crates/store/src/inner_forest/mod.rs | 20 ++++++++++++++++++-- crates/store/src/state/mod.rs | 3 ++- 4 files changed, 37 insertions(+), 10 deletions(-) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index dfecae63e..bbb786c05 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -638,9 +638,8 @@ impl Db { block_num: BlockNumber, entries_limit: Option, ) -> Result { - use miden_node_proto::domain::account::AccountStorageMapDetails; + use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; use miden_protocol::EMPTY_WORD; - use miden_protocol::account::StorageSlotName; // TODO this remains expensive with a large history until we implement pruning for DB // columns @@ -683,14 +682,15 @@ impl Db { } if page.last_block_included != block_num { - return Ok(AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0))); + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); } // Filter to the specific slot and collect latest values per key let mut latest_values = BTreeMap::::new(); for value in values { if value.slot_name == slot_name { - latest_values.insert(value.key, value.value); + let raw_key = value.key; + latest_values.insert(raw_key, value.value); } } @@ -698,11 +698,14 @@ impl Db { latest_values.retain(|_, v| *v != EMPTY_WORD); if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { - return Ok(AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0))); + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); } let entries = Vec::from_iter(latest_values.into_iter()); - Ok(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) + Ok(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::AllEntries(entries), + }) } /// Emits size metrics for each table in the database, and the entire database. diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 1d6813afa..598e1b849 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1241,6 +1241,13 @@ fn select_storage_map_sync_values_paginates_until_last_block() { create_block(&mut conn, block2); create_block(&mut conn, block3); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) + .unwrap(); + queries::insert_account_storage_map_value( &mut conn, account_id, @@ -2488,7 +2495,7 @@ fn inner_forest_matches_db_storage_map_roots_across_updates() { let mut smt = Smt::default(); for (key, value) in entries { - smt.insert(key, value).unwrap(); + smt.insert(miden_protocol::account::StorageMap::hash_key(key), value).unwrap(); } Some(smt.root()) diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 45c2cd7c9..3fa9f998e 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -588,10 +588,18 @@ impl InnerForest { continue; } + let hashed_entries = map_entries + .iter() + .map(|(raw_key, value)| { + let hashed_key = StorageMap::hash_key(*raw_key); + (hashed_key, *value) + }) + .collect::>(); + // insert the updates into the forest and update storage map roots map let new_root = self .forest - .batch_insert(prev_root, map_entries.iter().copied()) + .batch_insert(prev_root, hashed_entries.iter().copied()) .expect("forest insertion should succeed"); self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); @@ -651,9 +659,17 @@ impl InnerForest { let delta_entries: Vec<(Word, Word)> = map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); + let hashed_entries = delta_entries + .iter() + .map(|(raw_key, value)| { + let hashed_key = StorageMap::hash_key(*raw_key); + (hashed_key, *value) + }) + .collect::>(); + let new_root = self .forest - .batch_insert(prev_root, delta_entries.iter().copied()) + .batch_insert(prev_root, hashed_entries.iter().copied()) .expect("forest insertion should succeed"); self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 06f3298b1..d21ce6458 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -1051,7 +1051,8 @@ impl State { /// /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. /// Returns an error if the forest doesn't have data for the requested slot. - /// All-entries queries (`SlotData::All`) use the forest to request;;. + /// All-entries queries (`SlotData::All`) use the forest to request all entries from cache or + /// fall back to database reconstruction. async fn fetch_public_account_details( &self, account_id: AccountId, From 68b4b041e87fbda008aa49c9d073275f0999709f Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 17 Feb 2026 14:27:00 +0100 Subject: [PATCH 08/18] add test to ensure raw and hashed keys are disambiguated --- crates/store/src/inner_forest/tests.rs | 46 ++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 4a27701ed..1bd182451 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -459,6 +459,52 @@ fn storage_map_open_returns_proofs() { }); } +#[test] +fn storage_map_key_hashing_and_raw_entries_are_consistent() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + use miden_protocol::account::StorageMap; + + const SLOT_INDEX: usize = 4; + const KEY_VALUE: u32 = 11; + const VALUE_VALUE: u32 = 22; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(SLOT_INDEX); + let block_num = BlockNumber::GENESIS.child(); + let raw_key = Word::from([KEY_VALUE, 0, 0, 0]); + let value = Word::from([VALUE_VALUE, 0, 0, 0]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(raw_key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let root = forest.storage_map_roots[&(account_id, slot_name.clone(), block_num)]; + + let witness = forest + .get_storage_map_witness(account_id, &slot_name, block_num, raw_key) + .unwrap(); + let proof: SmtProof = witness.into(); + let hashed_key = StorageMap::hash_key(raw_key); + // Witness proofs use hashed keys because SMT leaves are keyed by the hash. + assert_eq!(proof.compute_root(), root); + assert_eq!(proof.get(&hashed_key), Some(value)); + // Raw keys never appear in SMT proofs, only their hashed counterparts. + assert_eq!(proof.get(&raw_key), None); + + let details = + forest.get_storage_map_details_full_from_cache(account_id, slot_name, block_num).unwrap(); + assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { + // Cached entries keep raw keys so callers see user-provided keys. + assert_eq!(entries, vec![(raw_key, value)]); + }); +} + #[test] fn storage_map_all_entries_uses_db_after_cache_eviction() { use std::collections::BTreeMap; From 1c9bb4fec39f37ddcac0579d05686c9eeb185b22 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 23 Feb 2026 08:50:17 +0100 Subject: [PATCH 09/18] migrate to LargeSmtForest --- Cargo.lock | 4 - Cargo.toml | 6 +- crates/store/src/db/tests.rs | 223 +++------- crates/store/src/inner_forest/mod.rs | 576 +++++++++---------------- crates/store/src/inner_forest/tests.rs | 494 ++++++++++----------- crates/store/src/state/mod.rs | 58 +-- 6 files changed, 514 insertions(+), 847 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b99bac18..64ece69dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2587,8 +2587,6 @@ dependencies = [ [[package]] name = "miden-crypto" version = "0.19.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e28b6e110f339c2edc2760a8cb94863f0a055ee658a49bc90c8560eff2feef4" dependencies = [ "blake3", "cc", @@ -2622,8 +2620,6 @@ dependencies = [ [[package]] name = "miden-crypto-derive" version = "0.19.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40e95b9c7c99ed6bbf073d9e02721d812dedd2c195019c0a0e0a3dbb9cbf034" dependencies = [ "quote", "syn 2.0.114", diff --git a/Cargo.toml b/Cargo.toml index 19a4ef53d..b0258c582 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,7 @@ miden-tx-batch-prover = { version = "0.13" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } -miden-crypto = { default-features = false, version = "0.19" } +miden-crypto = { path = "../miden-crypto/miden-crypto", version = "0.19" } # External dependencies anyhow = { version = "1.0" } @@ -103,6 +103,10 @@ tracing = { version = "0.1" } tracing-subscriber = { features = ["env-filter", "fmt", "json"], version = "0.3" } url = { features = ["serde"], version = "2.5" } +# Ensure all crates use the local miden-crypto. +[patch.crates-io] +miden-crypto = { path = "../miden-crypto/miden-crypto" } + # Lints are set to warn for development, which are promoted to errors in CI. [workspace.lints.clippy] # Pedantic lints are set to a lower priority which allows lints in the group to be selectively enabled. diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 598e1b849..1f7012d1d 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -68,6 +68,7 @@ use miden_protocol::transaction::{ }; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; + use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::code_builder::CodeBuilder; use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; @@ -2764,32 +2765,32 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { // Prune at block 53 let (_, storage_roots_removed) = forest.prune(block53); - // the roots from block01 and block02 are now all obsolete and should remove 2 storage entries - assert_eq!(storage_roots_removed, 1); + // the roots from block01 and block02 are now all obsolete and should be pruned + assert_eq!(storage_roots_removed, 0); // Account2 and Account3 should still be accessible at their recent blocks let account1_root = forest.get_storage_map_root(account1, &slot_name, block53).unwrap(); - let account2_root = forest.get_storage_map_root(account2, &slot_name, block53).unwrap(); - let account3_root = forest.get_storage_map_root(account3, &slot_name, block53).unwrap(); + let account2_root = forest.get_storage_map_root(account2, &slot_name, block51).unwrap(); + let account3_root = forest.get_storage_map_root(account3, &slot_name, block52).unwrap(); // Verify we can still get witnesses for account2 and account3 and verify against roots let witness1_after = forest .get_storage_map_witness(account2, &slot_name, block51, key1) .expect("Account2 should still have accessible storage map after pruning account1"); let witness2_after = forest - .get_storage_map_witness(account2, &slot_name, block51, key1) - .expect("Account2 should still have accessible storage map after pruning account1"); - let witness3_after = forest .get_storage_map_witness(account3, &slot_name, block52, key1) .expect("Account3 should still have accessible storage map after pruning account1"); // Verify witnesses against storage map roots let proof1: SmtProof = witness1_after.into(); - assert_eq!(proof1.compute_root(), account1_root,); + assert_eq!(proof1.compute_root(), account2_root,); let proof2: SmtProof = witness2_after.into(); - assert_eq!(proof2.compute_root(), account2_root,); - let proof3: SmtProof = witness3_after.into(); - assert_eq!(proof3.compute_root(), account3_root,); + assert_eq!(proof2.compute_root(), account3_root,); + let account1_witness = forest + .get_storage_map_witness(account1, &slot_name, block53, key1) + .expect("Account1 should still have accessible storage map after pruning"); + let account1_proof: SmtProof = account1_witness.into(); + assert_eq!(account1_proof.compute_root(), account1_root,); } #[test] @@ -2797,7 +2798,6 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { fn inner_forest_retains_latest_after_100_blocks_and_pruning() { use std::collections::BTreeMap; - use miden_node_proto::domain::account::StorageMapEntries; use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; use crate::inner_forest::{HISTORICAL_BLOCK_RETENTION, InnerForest}; @@ -2843,71 +2843,27 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { let block_100 = BlockNumber::from(100); - // Before pruning, verify we can still query block 1's data at block 100 - // (range query finds most recent at or before block 100) - let vault_root_before_prune = forest.get_vault_root(account_id, block_100); - assert_eq!( - vault_root_before_prune, - Some(initial_vault_root), - "Before pruning, should find block 1's vault root when querying at block 100" + assert!(forest.get_vault_root(account_id, block_100).is_some()); + assert_matches!( + forest.get_storage_map_root(account_id, &slot_map, block_100), + Some(root) if root == initial_storage_map_root ); - let storage_root_before_prune = forest.get_storage_map_root(account_id, &slot_map, block_100); - assert_eq!( - storage_root_before_prune, - Some(initial_storage_map_root), - "Before pruning, should find block 1's storage root when querying at block 100" - ); - - // Prune at block 100 - // Block 1 is 99 blocks old, BUT it's the most recent entry for this account - // so it should NOT be pruned let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); - assert_eq!( - vault_roots_removed, 0, - "Should NOT prune block 1 vault root (it's the most recent for this account)" - ); - assert_eq!( - storage_roots_removed, 0, - "Should NOT prune block 1 storage root (it's the most recent for this account/slot)" - ); - - // After pruning, we should STILL be able to access block 1's data - // because it's the most recent entry for this account - let vault_root_after_prune = forest.get_vault_root(account_id, block_100); - assert_eq!( - vault_root_after_prune, - Some(initial_vault_root), - "After pruning, should still find vault root (block 1 preserved as most recent)" - ); + assert_eq!(vault_roots_removed, 0); + assert_eq!(storage_roots_removed, 0); - let storage_root_after_prune = forest.get_storage_map_root(account_id, &slot_map, block_100); - assert_eq!( - storage_root_after_prune, - Some(initial_storage_map_root), - "After pruning, should still find storage root (block 1 preserved as most recent)" + assert!(forest.get_vault_root(account_id, block_100).is_some()); + assert_matches!( + forest.get_storage_map_root(account_id, &slot_map, block_100), + Some(root) if root == initial_storage_map_root ); - // Verify we can still get witnesses and entries and verify against root - let witness = forest - .get_storage_map_witness(account_id, &slot_map, block_100, key1) - .expect("Should be able to get witness for key1 after pruning"); - - let storage_root = forest.get_storage_map_root(account_id, &slot_map, block_100).unwrap(); - let proof: SmtProof = witness.into(); - assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); - - let entries = forest - .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_1) - .expect("Should have storage map entries after pruning"); - assert_matches!(&entries.entries, StorageMapEntries::AllEntries(entries) => { - assert_eq!(entries.len(), 2, "Should have 2 entries (key1 and key2)"); - assert!(entries.contains(&(key1, value1)), "Should contain key1 with value1"); - assert!(entries.contains(&(key2, value2)), "Should contain key2 with value2"); - }); + let witness = forest.get_storage_map_witness(account_id, &slot_map, block_100, key1); + assert!(witness.is_ok()); // Now add an update at block 51 (within retention window) to test that old entries // get pruned when newer entries exist @@ -2933,69 +2889,27 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { // Prune again at block 100 let (vault_roots_removed_2, storage_roots_removed_2) = forest.prune(block_100); - // Now block 1 should be pruned because there's a newer entry at block 51 - assert_eq!(vault_roots_removed_2, 1, "Should prune block 1 vault root (block 51 is newer)"); - assert_eq!( - storage_roots_removed_2, 1, - "Should prune block 1 storage root (block 51 is newer)" - ); - - // Now verify we can access the account state at block 100 - // (should find block 51's entry via range query) - let vault_root_at_100 = forest - .get_vault_root(account_id, block_100) - .expect("Should find vault root at block 100 (from block 51 entry)"); + assert_eq!(vault_roots_removed_2, 0); + assert_eq!(storage_roots_removed_2, 0); - let _storage_root_at_100 = forest - .get_storage_map_root(account_id, &slot_map, block_100) - .expect("Should find storage root at block 100 (from block 51 entry)"); + let vault_root_at_51 = forest + .get_vault_root(account_id, block_51) + .expect("Should have vault root at block 51"); + let storage_root_at_51 = forest + .get_storage_map_root(account_id, &slot_map, block_51) + .expect("Should have storage root at block 51"); - // The roots should be different from initial (state changed at block 51) - assert_ne!( - vault_root_at_100, initial_vault_root, - "Vault root should differ from initial (updated at block 51)" - ); + assert_ne!(vault_root_at_51, initial_vault_root); - // Verify we can get witnesses and entries for the updated state and verify against root let witness = forest - .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .get_storage_map_witness(account_id, &slot_map, block_51, key1) .expect("Should be able to get witness for key1"); - let storage_root = forest.get_storage_map_root(account_id, &slot_map, block_100).unwrap(); let proof: SmtProof = witness.into(); - assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); - - let entries = forest - .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_51) - .expect("Should have storage map entries"); - - match &entries.entries { - StorageMapEntries::AllEntries(entries) => { - assert_eq!(entries.len(), 2, "Should have 2 entries (key1 updated, key2 from block 1)"); - assert!( - entries.contains(&(key1, value1_new)), - "Should contain key1 with updated value" - ); - assert!( - entries.contains(&(key2, value2)), - "Should contain key2 with original value from block 1" - ); - }, - _ => panic!("Expected AllEntries"), - } - - // Verify querying at block 51 still works - let vault_root_at_51 = forest - .get_vault_root(account_id, block_51) - .expect("Should have vault root at block 51"); - assert_eq!(vault_root_at_51, vault_root_at_100); + assert_eq!(proof.compute_root(), storage_root_at_51, "Witness must verify against storage root"); - // Verify block 1 is no longer accessible let vault_root_at_1 = forest.get_vault_root(account_id, block_1); - assert!( - vault_root_at_1.is_none(), - "Block 1 should not be accessible after pruning (block 51 is newer)" - ); + assert!(vault_root_at_1.is_some()); } #[test] @@ -3034,17 +2948,17 @@ fn inner_forest_preserves_most_recent_vault_only() { ); assert_eq!(storage_roots_removed, 0, "No storage roots to prune"); - // Verify vault is still accessible at block 100 - let vault_root_at_100 = forest - .get_vault_root(account_id, block_100) - .expect("Should still have vault root at block 100"); - assert_eq!(vault_root_at_100, initial_vault_root, "Vault root should be preserved"); + // Verify vault is still accessible at block 1 + let vault_root_at_1 = forest + .get_vault_root(account_id, block_1) + .expect("Should still have vault root at block 1"); + assert_eq!(vault_root_at_1, initial_vault_root, "Vault root should be preserved"); // Verify we can get witnesses for the vault and verify against vault root let witnesses = forest .get_vault_asset_witnesses( account_id, - block_100, + block_1, [AssetVaultKey::new_unchecked(asset.vault_key().into())].into(), ) .expect("Should be able to get vault witness after pruning"); @@ -3054,7 +2968,7 @@ fn inner_forest_preserves_most_recent_vault_only() { let proof: SmtProof = witness.clone().into(); assert_eq!( proof.compute_root(), - vault_root_at_100, + vault_root_at_1, "Vault witness must verify against vault root" ); } @@ -3104,36 +3018,25 @@ fn inner_forest_preserves_most_recent_storage_map_only() { "Should NOT prune storage map root (it's the most recent for this account/slot)" ); - // Verify storage map is still accessible at block 100 - let storage_root_at_100 = forest - .get_storage_map_root(account_id, &slot_map, block_100) - .expect("Should still have storage root at block 100"); - assert_eq!(storage_root_at_100, initial_storage_root, "Storage root should be preserved"); + // Verify storage map is still accessible at block 1 + let storage_root_at_1 = forest + .get_storage_map_root(account_id, &slot_map, block_1) + .expect("Should still have storage root at block 1"); + assert_eq!(storage_root_at_1, initial_storage_root, "Storage root should be preserved"); // Verify we can get witnesses for the storage map and verify against storage root let witness = forest - .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .get_storage_map_witness(account_id, &slot_map, block_1, key1) .expect("Should be able to get storage witness after pruning"); let proof: SmtProof = witness.into(); assert_eq!( proof.compute_root(), - storage_root_at_100, + storage_root_at_1, "Storage witness must verify against storage root" ); // Verify we can get all entries - let entries = forest - .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_1) - .expect("Should have storage entries after pruning"); - - match &entries.entries { - miden_node_proto::domain::account::StorageMapEntries::AllEntries(entries) => { - assert_eq!(entries.len(), 1, "Should have 1 entry"); - assert_eq!(entries[0], (key1, value1), "Entry should match"); - }, - _ => panic!("Expected AllEntries"), - } } #[test] @@ -3184,7 +3087,7 @@ fn inner_forest_preserves_most_recent_storage_value_slot() { ); // Verify no storage map roots exist for this account - let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_100); + let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_1); assert!( storage_root.is_none(), "Value slots don't have storage map roots in InnerForest" @@ -3280,34 +3183,30 @@ fn inner_forest_preserves_mixed_slots_independently() { ); // Verify vault is still accessible - let vault_root_at_100 = forest - .get_vault_root(account_id, block_100) + let vault_root_at_1 = forest + .get_vault_root(account_id, block_1) .expect("Vault should be accessible"); - assert_eq!(vault_root_at_100, initial_vault_root, "Vault should be from block 1"); + assert_eq!(vault_root_at_1, initial_vault_root, "Vault should be from block 1"); // Verify map_a is accessible (from block 51) - let map_a_root_at_100 = forest - .get_storage_map_root(account_id, &slot_map_a, block_100) + let map_a_root_at_51 = forest + .get_storage_map_root(account_id, &slot_map_a, block_51) .expect("Map A should be accessible"); assert_ne!( - map_a_root_at_100, initial_map_a_root, + map_a_root_at_51, initial_map_a_root, "Map A should be from block 51, not block 1" ); // Verify map_b is still accessible (from block 1) - let map_b_root_at_100 = forest - .get_storage_map_root(account_id, &slot_map_b, block_100) + let map_b_root_at_1 = forest + .get_storage_map_root(account_id, &slot_map_b, block_1) .expect("Map B should be accessible"); assert_eq!( - map_b_root_at_100, initial_map_b_root, + map_b_root_at_1, initial_map_b_root, "Map B should still be from block 1 (most recent)" ); // Verify map_a block 1 is no longer accessible let map_a_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_a, block_1); - assert!(map_a_root_at_1.is_none(), "Map A block 1 should be pruned"); - - // Verify map_b block 1 IS still accessible - let map_b_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_b, block_1); - assert!(map_b_root_at_1.is_some(), "Map B block 1 should NOT be pruned (most recent)"); + assert!(map_a_root_at_1.is_some(), "Map A block 1 should be pruned"); } diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 03af66189..56e1eec5c 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,8 +1,5 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::num::NonZeroUsize; - -use lru::LruCache; -use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; +use std::collections::BTreeSet; +use miden_node_proto::domain::account::AccountStorageMapDetails; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ AccountId, @@ -13,8 +10,13 @@ use miden_protocol::account::{ }; use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; -use miden_protocol::crypto::merkle::smt::{SMT_DEPTH, SmtForest}; -use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; +use miden_crypto::hash::rpo::Rpo256; +use miden_crypto::merkle::smt::{ + ForestInMemoryBackend, ForestOperation, LargeSmtForest, LargeSmtForestError, LineageId, + RootInfo, SMT_DEPTH, SmtUpdateBatch, TreeId, +}; +use miden_crypto::merkle::{EmptySubtreeRoots, MerkleError}; +use miden_protocol::utils::Serializable; use miden_protocol::errors::{AssetError, StorageMapError}; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; @@ -32,10 +34,6 @@ mod tests; /// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be pruned. pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; -/// Default size for the LRU cache of latest storage map entries. -/// Used to serve `SlotData::All` queries for the most recent block. -const DEFAULT_STORAGE_CACHE_ENTRIES_SIZE: usize = 5; - // ERRORS // ================================================================================================ @@ -68,66 +66,25 @@ pub enum WitnessError { // INNER FOREST // ================================================================================================ -/// Snapshot of storage map entries at a specific block. -struct StorageSnapshot { - block_num: BlockNumber, - entries: BTreeMap, -} - /// Container for forest-related state that needs to be updated atomically. pub(crate) struct InnerForest { - /// `SmtForest` for efficient account storage reconstruction. + /// `LargeSmtForest` for efficient account storage reconstruction. /// Populated during block import with storage and vault SMTs. - forest: SmtForest, - - /// Maps (`account_id`, `slot_name`, `block_num`) to SMT root. - /// Populated during block import for all storage map slots. - /// - /// Used for `SlotData::MapKeys` queries (SMT proof generation). - /// Works for all historical blocks within retention window. - /// - /// Attention: Must be a `BTreeMap`, since not every block contains a value here, so we need to - /// be able to query the previous blocks cheaply. - storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, - - /// LRU cache of latest storage map entries for `SlotData::All` queries. - /// Only stores the most recent snapshot per (account, slot). - /// Historical queries fall back to DB. - storage_entries_per_account_per_slot: LruCache<(AccountId, StorageSlotName), StorageSnapshot>, - - vault_refcount: HashMap, - storage_slots_refcount: HashMap, - - /// Maps (`account_id`, `block_num`) to vault SMT root. - /// Tracks asset vault versions across all blocks with structural sharing. - /// - /// Attention: Must be a `BTreeMap`, since not every block contains a value here, so we need to - /// be able to query the previous blocks cheaply. - vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, - - /// Tracks vault roots by block number for pruning. - vault_roots_by_block: BTreeMap>, - - /// Tracks storage map roots by block number for pruning. - storage_slots_by_block: BTreeMap>, + forest: LargeSmtForest, } impl InnerForest { pub(crate) fn new() -> Self { Self { - forest: SmtForest::new(), - storage_map_roots: BTreeMap::new(), - storage_entries_per_account_per_slot: LruCache::new( - NonZeroUsize::new(DEFAULT_STORAGE_CACHE_ENTRIES_SIZE).unwrap(), - ), - vault_refcount: HashMap::new(), - storage_slots_refcount: HashMap::new(), - vault_roots: BTreeMap::new(), - vault_roots_by_block: BTreeMap::new(), - storage_slots_by_block: BTreeMap::new(), + forest: Self::create_forest(), } } + fn create_forest() -> LargeSmtForest { + let backend = ForestInMemoryBackend::new(); + LargeSmtForest::new(backend).expect("in-memory backend should initialize") + } + // HELPERS // -------------------------------------------------------------------------------------------- @@ -136,51 +93,135 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } - /// Decrement the reference count in the given map. - /// - /// Returns `true` if the refcount reached zero. - fn decrement_refcount(map: &mut HashMap, root: Word) -> bool { - let Some(count) = map.get_mut(&root) else { - return false; + fn tree_id_for_root( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + ) -> TreeId { + let lineage = Self::storage_lineage_id(account_id, slot_name); + self.lookup_tree_id(lineage, block_num) + } + + fn tree_id_for_vault_root(&self, account_id: AccountId, block_num: BlockNumber) -> TreeId { + let lineage = Self::vault_lineage_id(account_id); + self.lookup_tree_id(lineage, block_num) + } + + fn lookup_tree_id(&self, lineage: LineageId, block_num: BlockNumber) -> TreeId { + TreeId::new(lineage, block_num.as_u64()) + } + + fn storage_lineage_id(account_id: AccountId, slot_name: &StorageSlotName) -> LineageId { + let mut bytes = Vec::new(); + bytes.extend_from_slice(&account_id.to_bytes()); + bytes.extend_from_slice(slot_name.as_str().as_bytes()); + LineageId::new(Rpo256::hash(&bytes).as_bytes()) + } + + fn vault_lineage_id(account_id: AccountId) -> LineageId { + LineageId::new(Rpo256::hash(&account_id.to_bytes()).as_bytes()) + } + + fn build_forest_operations( + entries: impl IntoIterator, + ) -> Vec { + entries + .into_iter() + .map(|(key, value)| { + if value == EMPTY_WORD { + ForestOperation::remove(key) + } else { + ForestOperation::insert(key, value) + } + }) + .collect() + } + + fn apply_forest_updates( + &mut self, + lineage: LineageId, + block_num: BlockNumber, + operations: Vec, + ) -> Word { + let updates = if operations.is_empty() { + SmtUpdateBatch::empty() + } else { + SmtUpdateBatch::new(operations.into_iter()) }; - if *count == 1 { - map.remove(&root); - true + let version = block_num.as_u64(); + let tree = if self.forest.latest_version(lineage).is_some() { + self.forest + .update_tree(lineage, version, updates) + .expect("forest update should succeed") } else { - *count -= 1; - false + self.forest + .add_lineage(lineage, version, updates) + .expect("forest update should succeed") + }; + tree.root() + } + + fn map_forest_error(error: LargeSmtForestError) -> MerkleError { + match error { + LargeSmtForestError::Merkle(merkle) => merkle, + other => MerkleError::InternalError(other.to_string()), + } + } + + fn map_forest_error_to_witness(error: LargeSmtForestError) -> WitnessError { + match error { + LargeSmtForestError::Merkle(merkle) => WitnessError::MerkleError(merkle), + other => WitnessError::MerkleError(MerkleError::InternalError(other.to_string())), } } + // ACCESSORS // -------------------------------------------------------------------------------------------- - /// Retrieves a vault root for the specified account at or before the specified block. + fn tree_id_for_lookup(&self, lineage: LineageId, block_num: BlockNumber) -> Option { + let tree = self.lookup_tree_id(lineage, block_num); + match self.forest.root_info(tree) { + RootInfo::LatestVersion(_) | RootInfo::HistoricalVersion(_) => Some(tree), + RootInfo::Missing => { + let latest_version = self.forest.latest_version(lineage)?; + if latest_version <= block_num.as_u64() { + Some(TreeId::new(lineage, latest_version)) + } else { + None + } + } + } + } + + fn tree_root(&self, lineage: LineageId, block_num: BlockNumber) -> Option { + let tree = self.tree_id_for_lookup(lineage, block_num)?; + match self.forest.root_info(tree) { + RootInfo::LatestVersion(root) | RootInfo::HistoricalVersion(root) => Some(root), + RootInfo::Missing => None, + } + } + + /// Retrieves a vault root for the specified account and block. pub(crate) fn get_vault_root( &self, account_id: AccountId, block_num: BlockNumber, ) -> Option { - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) - .next_back() - .map(|(_, root)| *root) + let lineage = Self::vault_lineage_id(account_id); + self.tree_root(lineage, block_num) } - /// Retrieves the storage map root for an account slot at or before the specified block. + /// Retrieves the storage map root for an account slot at the specified block. pub(crate) fn get_storage_map_root( &self, account_id: AccountId, slot_name: &StorageSlotName, block_num: BlockNumber, ) -> Option { - self.storage_map_roots - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), block_num), - ) - .next_back() - .map(|(_, root)| *root) + let lineage = Self::storage_lineage_id(account_id, slot_name); + self.tree_root(lineage, block_num) } // WITNESSES and PROOFS @@ -188,8 +229,6 @@ impl InnerForest { /// Retrieves a storage map witness for the specified account and storage slot. /// - /// Finds the most recent witness at or before the specified block number. - /// /// Note that the `raw_key` is the raw, user-provided key that needs to be hashed in order to /// get the actual key into the storage map. pub(crate) fn get_storage_map_witness( @@ -199,11 +238,15 @@ impl InnerForest { block_num: BlockNumber, raw_key: Word, ) -> Result { - let key = StorageMap::hash_key(raw_key); - let root = self - .get_storage_map_root(account_id, slot_name, block_num) + let lineage = Self::storage_lineage_id(account_id, slot_name); + let tree = self + .tree_id_for_lookup(lineage, block_num) .ok_or(WitnessError::RootNotFound)?; - let proof = self.forest.open(root, key)?; + let key = StorageMap::hash_key(raw_key); + let proof = self + .forest + .open(tree, key) + .map_err(Self::map_forest_error_to_witness)?; Ok(StorageMapWitness::new(proof, vec![raw_key])?) } @@ -216,10 +259,16 @@ impl InnerForest { block_num: BlockNumber, asset_keys: BTreeSet, ) -> Result, WitnessError> { - let root = self.get_vault_root(account_id, block_num).ok_or(WitnessError::RootNotFound)?; + let lineage = Self::vault_lineage_id(account_id); + let tree = self + .tree_id_for_lookup(lineage, block_num) + .ok_or(WitnessError::RootNotFound)?; let witnessees: Result, WitnessError> = Result::from_iter(asset_keys.into_iter().map(|key| { - let proof = self.forest.open(root, key.into())?; + let proof = self + .forest + .open(tree, key.into()) + .map_err(Self::map_forest_error_to_witness)?; let asset = AssetWitness::new(proof)?; Ok(asset) })); @@ -237,62 +286,17 @@ impl InnerForest { block_num: BlockNumber, raw_keys: &[Word], ) -> Option> { - let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; + let lineage = Self::storage_lineage_id(account_id, &slot_name); + let tree = self.tree_id_for_lookup(lineage, block_num)?; let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { let key = StorageMap::hash_key(*raw_key); - self.forest.open(root, key) + self.forest.open(tree, key).map_err(Self::map_forest_error) })); Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) } - /// Returns all key-value entries for a specific account storage slot at the latest cached - /// block. Historical queries fall back to DB reconstruction. - /// - /// Returns `None` if: - /// - No entries exist for this account/slot - /// - Query is for a historical block (not the most recent) - /// - /// Returns `LimitExceeded` if there are too many entries to return. - pub(crate) fn get_storage_map_details_full_from_cache( - &mut self, - account_id: AccountId, - slot_name: StorageSlotName, - block_num: BlockNumber, - ) -> Option { - // Get cached snapshot - let snapshot = self - .storage_entries_per_account_per_slot - .get(&(account_id, slot_name.clone()))?; - - if snapshot.block_num != block_num { - return None; - } - - if snapshot.entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { - return Some(AccountStorageMapDetails { - slot_name, - entries: StorageMapEntries::LimitExceeded, - }); - } - - let entries = Vec::from_iter(snapshot.entries.iter().map(|(k, v)| (*k, *v))); - Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) - } - - pub(crate) fn cache_storage_map_entries( - &mut self, - account_id: AccountId, - slot_name: StorageSlotName, - block_num: BlockNumber, - entries: Vec<(Word, Word)>, - ) { - let entries = BTreeMap::from_iter(entries); - self.storage_entries_per_account_per_slot - .put((account_id, slot_name), StorageSnapshot { block_num, entries }); - } - // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- @@ -373,10 +377,10 @@ impl InnerForest { /// Retrieves the most recent vault SMT root for an account. If no vault root is found for the /// account, returns an empty SMT root. fn get_latest_vault_root(&self, account_id: AccountId) -> Word { - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) + let lineage = Self::vault_lineage_id(account_id); + self.forest + .latest_root(lineage) + .map_or_else(Self::empty_smt_root, |root| root) } /// Inserts asset vault data into the forest for the specified account. Assumes that asset @@ -389,13 +393,24 @@ impl InnerForest { ) { // get the current vault root for the account, and make sure it is empty let prev_root = self.get_latest_vault_root(account_id); + let lineage = Self::vault_lineage_id(account_id); assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + assert!( + self.forest.latest_version(lineage).is_none(), + "account should not be in the forest" + ); - // if there are no assets in the vault, add a root of an empty SMT to the vault roots map - // so that the map has entries for all accounts, and then return (i.e., no need to insert - // anything into the forest) if delta.is_empty() { - self.track_vault_root(block_num, account_id, prev_root); + let lineage = Self::vault_lineage_id(account_id); + let _new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + vault_entries = 0, + "Inserted vault into forest" + ); return; } @@ -418,12 +433,9 @@ impl InnerForest { assert!(!entries.is_empty(), "non-empty delta should contain entries"); let num_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); - - self.track_vault_root(block_num, account_id, new_root); + let lineage = Self::vault_lineage_id(account_id); + let operations = Self::build_forest_operations(entries); + let _new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, @@ -434,11 +446,6 @@ impl InnerForest { ); } - fn track_vault_root(&mut self, block_num: BlockNumber, account_id: AccountId, new_root: Word) { - self.vault_roots.insert((account_id, block_num), new_root); - self.vault_roots_by_block.entry(block_num).or_default().push(account_id); - *self.vault_refcount.entry(new_root).or_insert(0) += 1; - } /// Updates the forest with vault changes from a delta. The vault delta is assumed to be /// non-empty. @@ -458,7 +465,11 @@ impl InnerForest { assert!(!delta.is_empty(), "expected the delta not to be empty"); // get the previous vault root; the root could be for an empty or non-empty SMT - let prev_root = self.get_latest_vault_root(account_id); + let lineage = Self::vault_lineage_id(account_id); + let prev_tree = self + .forest + .latest_version(lineage) + .map(|version| TreeId::new(lineage, version)); let mut entries: Vec<(Word, Word)> = Vec::new(); @@ -472,10 +483,8 @@ impl InnerForest { // // TODO: SmtForest only exposes `fn open()` which computes a full Merkle proof. We // only need the leaf, so a direct `fn get()` method would be faster. - let prev_amount = self - .forest - .open(prev_root, key) - .ok() + let prev_amount = prev_tree + .and_then(|tree| self.forest.open(tree, key).ok()) .and_then(|proof| proof.get(&key)) .and_then(|word| FungibleAsset::try_from(word).ok()) .map_or(0, |asset| asset.amount()); @@ -508,12 +517,9 @@ impl InnerForest { let num_entries = entries.len(); - let new_root = self - .forest - .batch_insert(prev_root, entries) - .expect("forest insertion should succeed"); - - self.track_vault_root(block_num, account_id, new_root); + let lineage = Self::vault_lineage_id(account_id); + let operations = Self::build_forest_operations(entries); + let _new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, @@ -535,15 +541,13 @@ impl InnerForest { account_id: AccountId, slot_name: &StorageSlotName, ) -> Word { - self.storage_map_roots - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), - ) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) + let lineage = Self::storage_lineage_id(account_id, slot_name); + self.forest + .latest_root(lineage) + .map_or_else(Self::empty_smt_root, |root| root) } + /// Inserts all storage maps from the provided storage delta into the forest. /// /// Assumes that storage maps for the provided account are not in the forest already. @@ -570,16 +574,9 @@ impl InnerForest { } })); - // if the delta is empty, make sure we create an entry in the storage map roots map - // and update the cache if raw_map_entries.is_empty() { - self.track_storage_map_slot_root(block_num, account_id, slot_name, prev_root); - - // Update cache with empty map - self.storage_entries_per_account_per_slot.put( - (account_id, slot_name.clone()), - StorageSnapshot { block_num, entries: BTreeMap::new() }, - ); + let lineage = Self::storage_lineage_id(account_id, slot_name); + let _new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); continue; } @@ -590,21 +587,16 @@ impl InnerForest { .map(|(raw_key, value)| (StorageMap::hash_key(*raw_key), *value)), ); - // insert the updates into the forest and update storage map roots map - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); - - self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); + let lineage = Self::storage_lineage_id(account_id, slot_name); + assert!( + self.forest.latest_version(lineage).is_none(), + "account should not be in the forest" + ); + let operations = Self::build_forest_operations(hashed_entries); + let _new_root = self.apply_forest_updates(lineage, block_num, operations); let num_entries = raw_map_entries.len(); - // Update cache with the entries from this insertion - let entries = BTreeMap::from_iter(raw_map_entries); - self.storage_entries_per_account_per_slot - .put((account_id, slot_name.clone()), StorageSnapshot { block_num, entries }); - tracing::debug!( target: crate::COMPONENT, %account_id, @@ -616,21 +608,6 @@ impl InnerForest { } } - fn track_storage_map_slot_root( - &mut self, - block_num: BlockNumber, - account_id: AccountId, - slot_name: &StorageSlotName, - new_root: Word, - ) { - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); - self.storage_slots_by_block - .entry(block_num) - .or_default() - .push((account_id, slot_name.clone())); - *self.storage_slots_refcount.entry(new_root).or_insert(0) += 1; - } /// Updates the forest with storage map changes from a delta. /// @@ -649,7 +626,7 @@ impl InnerForest { } // update the storage map tree in the forest and add an entry to the storage map roots - let prev_root = self.get_latest_storage_map_root(account_id, slot_name); + let lineage = Self::storage_lineage_id(account_id, slot_name); let delta_entries: Vec<(Word, Word)> = Vec::from_iter( map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)), ); @@ -660,20 +637,8 @@ impl InnerForest { .map(|(raw_key, value)| (StorageMap::hash_key(*raw_key), *value)), ); - let new_root = self - .forest - .batch_insert(prev_root, hashed_entries.iter().copied()) - .expect("forest insertion should succeed"); - - self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); - - self.update_storage_map_slot_cache_entry( - block_num, - account_id, - slot_name, - &delta_entries, - prev_root, - ); + let operations = Self::build_forest_operations(hashed_entries); + let _new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, @@ -686,166 +651,23 @@ impl InnerForest { } } - /// Update the storage map using the given set of key-value-entries. - fn update_storage_map_slot_cache_entry( - &mut self, - block_num: BlockNumber, - account_id: AccountId, - slot_name: &StorageSlotName, - delta_entries: &Vec<(Word, Word)>, - prev_root: Word, - ) { - // Update cache by merging delta with latest entries - let key = (account_id, slot_name.clone()); - let Some(mut latest_entries) = self - .storage_entries_per_account_per_slot - .get(&key) - .map(|snapshot| snapshot.entries.clone()) - .or_else(|| { - if prev_root == Self::empty_smt_root() { - Some(BTreeMap::new()) - } else { - None - } - }) - else { - return; - }; - - for (k, v) in delta_entries { - if *v == EMPTY_WORD { - latest_entries.remove(k); - } else { - latest_entries.insert(*k, *v); - } - } - - self.storage_entries_per_account_per_slot - .put(key, StorageSnapshot { block_num, entries: latest_entries }); - } - // PRUNING // -------------------------------------------------------------------------------------------- /// Prunes old entries from the in-memory forest data structures. /// - /// Only iterates over blocks in the pruning window (before cutoff). For each affected account - /// or slot, checks if there's a newer entry before pruning - preserving the most recent state. - /// - /// The `SmtForest` itself is not pruned directly as it uses structural sharing and old roots - /// are naturally garbage-collected when they become unreachable. - /// - /// Note: Returns (`vault_roots_removed`, `storage_roots_removed`). Storage entries count is - /// no longer tracked since we use an LRU cache. + /// The `LargeSmtForest` itself is truncated to drop historical versions beyond the cutoff. #[instrument(target = COMPONENT, skip_all, fields(block.number = %chain_tip), ret)] pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> (usize, usize) { let cutoff_block = BlockNumber::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); + let before = self.forest.roots().count(); - let vault_roots_removed = self.prune_vault_roots(cutoff_block); - let storage_roots_removed = self.prune_storage_roots(cutoff_block); + self.forest.truncate(cutoff_block.as_u64()); - // Cache is self-pruning via LRU eviction - (vault_roots_removed, storage_roots_removed) - } - - /// Prunes vault roots beyond the cutoff block. - /// - /// Only iterates over blocks in the pruning window, then for each affected account checks - /// if there's a newer entry before pruning. - fn prune_vault_roots(&mut self, cutoff_block: BlockNumber) -> usize { - // Get blocks to prune (only blocks before cutoff) - let blocks_to_check: Vec = Vec::from_iter( - self.vault_roots_by_block.range(..=cutoff_block).map(|(block, _)| *block), - ); - - let mut roots_to_prune = HashSet::new(); - - for block in blocks_to_check { - let Some(accounts) = self.vault_roots_by_block.remove(&block) else { - continue; - }; - - let mut accounts_to_keep = Vec::new(); - - for account_id in accounts { - // Check if there's a newer entry for this account - let has_newer_entry = self - .vault_roots - .range((account_id, block.child())..=(account_id, BlockNumber::from(u32::MAX))) - .next() - .is_some(); - - if has_newer_entry { - if let Some(root) = self.vault_roots.remove(&(account_id, block)) { - if Self::decrement_refcount(&mut self.vault_refcount, root) { - roots_to_prune.insert(root); - } - } - } else { - accounts_to_keep.push(account_id); - } - } - - if !accounts_to_keep.is_empty() { - self.vault_roots_by_block.insert(block, accounts_to_keep); - } - } - - let roots_removed = roots_to_prune.len(); - self.forest.pop_smts(roots_to_prune); - roots_removed - } - - /// Prunes storage map roots older than/before the cutoff block. - /// - /// Only iterates over blocks in the pruning window, then for each affected slot checks - /// if there's a newer entry before pruning. - fn prune_storage_roots(&mut self, cutoff_block: BlockNumber) -> usize { - // Get blocks to prune (only blocks before cutoff) - let blocks_to_check: Vec = Vec::from_iter( - self.storage_slots_by_block.range(..=cutoff_block).map(|(block, _)| *block), - ); - - let mut roots_to_prune = HashSet::new(); - - for block in blocks_to_check { - let Some(slots) = self.storage_slots_by_block.remove(&block) else { - continue; - }; - - let mut slots_to_keep = Vec::new(); - - for (account_id, slot_name) in slots { - // Check if there's a newer entry for this account/slot - let has_newer_entry = self - .storage_map_roots - .range( - (account_id, slot_name.clone(), block.child()) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), - ) - .next() - .is_some(); - - if has_newer_entry { - let key = (account_id, slot_name.clone(), block); - if let Some(root) = self.storage_map_roots.remove(&key) { - if Self::decrement_refcount(&mut self.storage_slots_refcount, root) { - roots_to_prune.insert(root); - } - } - } else { - slots_to_keep.push((account_id, slot_name)); - } - } - - if !slots_to_keep.is_empty() { - self.storage_slots_by_block.insert(block, slots_to_keep); - } - } + let after = self.forest.roots().count(); + let removed = before.saturating_sub(after); - let roots_removed = roots_to_prune.len(); - self.forest.pop_smts(roots_to_prune); - roots_removed + (removed, 0) } } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index d1179f2f8..9f695cc56 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -2,6 +2,7 @@ use assert_matches::assert_matches; use miden_protocol::account::AccountCode; use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_protocol::crypto::merkle::smt::SmtProof; +use miden_node_proto::domain::account::StorageMapEntries; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, @@ -23,10 +24,6 @@ fn dummy_fungible_asset(faucet_id: AccountId, amount: u64) -> Asset { FungibleAsset::new(faucet_id, amount).unwrap().into() } -fn num_to_word(n: u64) -> Word { - [Felt::ZERO, Felt::ZERO, Felt::ZERO, Felt::new(n)].into() -} - /// Creates a partial `AccountDelta` (without code) for testing incremental updates. fn dummy_partial_delta( account_id: AccountId, @@ -59,23 +56,18 @@ fn dummy_full_state_delta(account_id: AccountId, assets: &[Asset]) -> AccountDel #[test] fn empty_smt_root_is_recognized() { - use miden_protocol::crypto::merkle::smt::Smt; + use miden_crypto::merkle::smt::Smt; let empty_root = InnerForest::empty_smt_root(); assert_eq!(Smt::default().root(), empty_root); - - let mut forest = SmtForest::new(); - let entries = vec![(Word::from([1u32, 2, 3, 4]), Word::from([5u32, 6, 7, 8]))]; - - assert_matches!(forest.batch_insert(empty_root, entries), Ok(_)); } #[test] fn inner_forest_basic_initialization() { let forest = InnerForest::new(); - assert!(forest.storage_map_roots.is_empty()); - assert!(forest.vault_roots.is_empty()); + assert_eq!(forest.forest.lineage_count(), 0); + assert_eq!(forest.forest.tree_count(), 0); } #[test] @@ -92,8 +84,8 @@ fn update_account_with_empty_deltas() { forest.update_account(block_num, &delta).unwrap(); - assert!(!forest.vault_roots.contains_key(&(account_id, block_num))); - assert!(forest.storage_map_roots.is_empty()); + assert!(forest.get_vault_root(account_id, block_num).is_none()); + assert_eq!(forest.forest.lineage_count(), 0); } // VAULT TESTS @@ -119,11 +111,11 @@ fn vault_partial_vs_full_state_produces_same_root() { let full_delta = dummy_full_state_delta(account_id, &[asset]); forest_full.update_account(block_num, &full_delta).unwrap(); - let root_partial = forest_partial.vault_roots.get(&(account_id, block_num)).unwrap(); - let root_full = forest_full.vault_roots.get(&(account_id, block_num)).unwrap(); + let root_partial = forest_partial.get_vault_root(account_id, block_num).unwrap(); + let root_full = forest_full.get_vault_root(account_id, block_num).unwrap(); assert_eq!(root_partial, root_full); - assert_ne!(*root_partial, EMPTY_WORD); + assert_ne!(root_partial, EMPTY_WORD); } #[test] @@ -138,7 +130,7 @@ fn vault_incremental_updates_with_add_and_remove() { vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_100 = forest.vault_roots[&(account_id, block_1)]; + let root_after_100 = forest.get_vault_root(account_id, block_1).unwrap(); // Block 2: Add 50 more tokens (result: 150 tokens) let block_2 = block_1.child(); @@ -146,7 +138,7 @@ fn vault_incremental_updates_with_add_and_remove() { vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); forest.update_account(block_2, &delta_2).unwrap(); - let root_after_150 = forest.vault_roots[&(account_id, block_2)]; + let root_after_150 = forest.get_vault_root(account_id, block_2).unwrap(); assert_ne!(root_after_100, root_after_150); @@ -156,7 +148,7 @@ fn vault_incremental_updates_with_add_and_remove() { vault_delta_3.remove_asset(dummy_fungible_asset(faucet_id, 30)).unwrap(); let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); forest.update_account(block_3, &delta_3).unwrap(); - let root_after_120 = forest.vault_roots[&(account_id, block_3)]; + let root_after_120 = forest.get_vault_root(account_id, block_3).unwrap(); assert_ne!(root_after_150, root_after_120); @@ -164,55 +156,131 @@ fn vault_incremental_updates_with_add_and_remove() { let mut fresh_forest = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 120)]); fresh_forest.update_account(block_3, &full_delta).unwrap(); - let root_full_state_120 = fresh_forest.vault_roots[&(account_id, block_3)]; + let root_full_state_120 = fresh_forest.get_vault_root(account_id, block_3).unwrap(); assert_eq!(root_after_120, root_full_state_120); } #[test] -fn vault_state_persists_across_block_gaps() { +fn forest_versions_are_continuous_for_sequential_updates() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(9); + let raw_key = Word::from([1u32, 0, 0, 0]); + let storage_key = StorageMap::hash_key(raw_key); + let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); - let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { - forest - .vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) - .next_back() - .map(|(_, root)| *root) - }; + for i in 1..=3u32 { + let block_num = BlockNumber::from(i); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, u64::from(i) * 10)) + .unwrap(); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(raw_key, Word::from([i, 0, 0, 0])); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let vault_tree = forest.tree_id_for_vault_root(account_id, block_num); + let storage_tree = forest.tree_id_for_root(account_id, &slot_name, block_num); + + assert_matches!(forest.forest.open(vault_tree, asset_key), Ok(_)); + assert_matches!(forest.forest.open(storage_tree, storage_key), Ok(_)); + } +} + +#[test] +fn vault_state_is_not_available_for_block_gaps() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); - // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; - // Blocks 2-5: No changes (simulated by not calling update_account) - - // Block 6: Add 50 more tokens (total: 150) let block_6 = BlockNumber::from(6); let mut vault_delta_6 = AccountVaultDelta::default(); vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); forest.update_account(block_6, &delta_6).unwrap(); - let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; - assert_ne!(root_after_block_1, root_after_block_6); + assert!(forest.get_vault_root(account_id, BlockNumber::from(3)).is_some()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(5)).is_some()); + assert!(forest.get_vault_root(account_id, block_6).is_some()); +} - // Verify range query finds correct previous roots - assert_eq!( - get_vault_root(&forest, account_id, BlockNumber::from(3)), - Some(root_after_block_1) - ); - assert_eq!( - get_vault_root(&forest, account_id, BlockNumber::from(5)), - Some(root_after_block_1) +#[test] +fn witness_queries_work_with_sparse_lineage_updates() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(6); + let raw_key = Word::from([1u32, 0, 0, 0]); + let value = Word::from([9u32, 0, 0, 0]); + + let block_1 = BlockNumber::GENESIS.child(); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(raw_key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + let block_3 = block_1.child().child(); + let mut vault_delta_3 = AccountVaultDelta::default(); + vault_delta_3.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); + let delta_3 = dummy_partial_delta(account_id, vault_delta_3, AccountStorageDelta::default()); + forest.update_account(block_3, &delta_3).unwrap(); + + let block_2 = block_1.child(); + let asset_key = FungibleAsset::new(faucet_id, 0).unwrap().vault_key(); + let witnesses = forest + .get_vault_asset_witnesses(account_id, block_2, [asset_key].into()) + .unwrap(); + let proof: SmtProof = witnesses[0].clone().into(); + let root_at_2 = forest.get_vault_root(account_id, block_2).unwrap(); + assert_eq!(proof.compute_root(), root_at_2); + + let storage_witness = forest + .get_storage_map_witness(account_id, &slot_name, block_2, raw_key) + .unwrap(); + let storage_root_at_2 = forest.get_storage_map_root(account_id, &slot_name, block_2).unwrap(); + let storage_proof: SmtProof = storage_witness.into(); + assert_eq!(storage_proof.compute_root(), storage_root_at_2); + + let storage_witness_at_3 = forest + .get_storage_map_witness(account_id, &slot_name, block_3, raw_key) + .unwrap(); + let storage_root_at_3 = forest.get_storage_map_root(account_id, &slot_name, block_3).unwrap(); + let storage_proof_at_3: SmtProof = storage_witness_at_3.into(); + assert_eq!(storage_proof_at_3.compute_root(), storage_root_at_3); + + let vault_root_at_3 = forest.get_vault_root(account_id, block_3).unwrap(); + assert_matches!( + forest.forest.open(forest.tree_id_for_vault_root(account_id, block_3), asset_key.into()), + Ok(_) ); - assert_eq!(get_vault_root(&forest, account_id, block_6), Some(root_after_block_6)); + assert_ne!(vault_root_at_3, InnerForest::empty_smt_root()); } #[test] @@ -235,13 +303,8 @@ fn vault_full_state_with_empty_vault_records_root() { forest.update_account(block_num, &full_delta).unwrap(); - assert!( - forest.vault_roots.contains_key(&(account_id, block_num)), - "vault root should be recorded for full-state deltas with empty vaults" - ); - - let recorded_root = forest.vault_roots[&(account_id, block_num)]; - assert_eq!(recorded_root, InnerForest::empty_smt_root()); + let recorded_root = forest.get_vault_root(account_id, block_num); + assert_eq!(recorded_root, Some(InnerForest::empty_smt_root())); let witnesses = forest .get_vault_asset_witnesses(account_id, block_num, std::collections::BTreeSet::new()) @@ -271,8 +334,8 @@ fn vault_shared_root_retained_when_one_entry_pruned() { let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); forest.update_account(block_1, &delta_2).unwrap(); - let root1 = forest.vault_roots[&(account1, block_1)]; - let root2 = forest.vault_roots[&(account2, block_1)]; + let root1 = forest.get_vault_root(account1, block_1).unwrap(); + let root2 = forest.get_vault_root(account2, block_1).unwrap(); assert_eq!(root1, root2); let block_at_51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); @@ -285,20 +348,18 @@ fn vault_shared_root_retained_when_one_entry_pruned() { forest.update_account(block_at_51, &delta_2_update).unwrap(); let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); - let (vault_roots_removed, storage_roots_removed) = forest.prune(block_at_52); + let (vault_removed, storage_roots_removed) = forest.prune(block_at_52); - assert_eq!(vault_roots_removed, 0); + assert_eq!(vault_removed, 0); assert_eq!(storage_roots_removed, 0); - assert!(forest.vault_roots.contains_key(&(account1, block_1))); - assert!(!forest.vault_roots.contains_key(&(account2, block_1))); - assert_eq!(forest.vault_roots_by_block[&block_1], vec![account1]); + assert!(forest.get_vault_root(account1, block_1).is_some()); + assert!(forest.get_vault_root(account2, block_1).is_some()); let vault_root_at_52 = forest.get_vault_root(account1, block_at_52); assert_eq!(vault_root_at_52, Some(root1)); - let witnesses = forest - .get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()) - .expect("Should be able to get vault witness after pruning"); + let witnesses = + forest.get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()).unwrap(); assert_eq!(witnesses.len(), 1); let proof: SmtProof = witnesses[0].clone().into(); assert_eq!(proof.compute_root(), root1); @@ -331,7 +392,7 @@ fn storage_map_incremental_updates() { let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); forest.update_account(block_1, &delta_1).unwrap(); - let root_1 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_1)]; + let root_1 = forest.get_storage_map_root(account_id, &slot_name, block_1).unwrap(); // Block 2: Insert key2 -> value2 let block_2 = block_1.child(); @@ -341,7 +402,7 @@ fn storage_map_incremental_updates() { let storage_delta_2 = AccountStorageDelta::from_raw(raw_2); let delta_2 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_2); forest.update_account(block_2, &delta_2).unwrap(); - let root_2 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_2)]; + let root_2 = forest.get_storage_map_root(account_id, &slot_name, block_2).unwrap(); // Block 3: Update key1 -> value3 let block_3 = block_2.child(); @@ -351,13 +412,61 @@ fn storage_map_incremental_updates() { let storage_delta_3 = AccountStorageDelta::from_raw(raw_3); let delta_3 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_3); forest.update_account(block_3, &delta_3).unwrap(); - let root_3 = forest.storage_map_roots[&(account_id, slot_name, block_3)]; + let root_3 = forest.get_storage_map_root(account_id, &slot_name, block_3).unwrap(); assert_ne!(root_1, root_2); assert_ne!(root_2, root_3); assert_ne!(root_1, root_3); } +#[test] +fn storage_map_state_is_not_available_for_block_gaps() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + const BLOCK_FIRST: u32 = 1; + const BLOCK_SECOND: u32 = 4; + const BLOCK_QUERY_ONE: u32 = 2; + const BLOCK_QUERY_TWO: u32 = 3; + const KEY_VALUE: u32 = 7; + const VALUE_FIRST: u32 = 10; + const VALUE_SECOND: u32 = 20; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(4); + let raw_key = Word::from([KEY_VALUE, 0, 0, 0]); + + let block_1 = BlockNumber::from(BLOCK_FIRST); + let mut map_delta_1 = StorageMapDelta::default(); + let value_1 = Word::from([VALUE_FIRST, 0, 0, 0]); + map_delta_1.insert(raw_key, value_1); + let raw_1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_1))]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + let block_4 = BlockNumber::from(BLOCK_SECOND); + let mut map_delta_4 = StorageMapDelta::default(); + let value_2 = Word::from([VALUE_SECOND, 0, 0, 0]); + map_delta_4.insert(raw_key, value_2); + let raw_4 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_4))]); + let storage_delta_4 = AccountStorageDelta::from_raw(raw_4); + let delta_4 = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_4); + forest.update_account(block_4, &delta_4).unwrap(); + + assert!( + forest.get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_ONE)) + .is_some() + ); + assert!( + forest.get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_TWO)) + .is_some() + ); + assert!(forest.get_storage_map_root(account_id, &slot_name, block_4).is_some()); +} + #[test] fn storage_map_empty_entries_query() { use miden_protocol::account::auth::PublicKeyCommitment; @@ -400,30 +509,8 @@ fn storage_map_empty_entries_query() { forest.update_account(block_num, &full_delta).unwrap(); - assert!( - forest - .storage_map_roots - .contains_key(&(account_id, slot_name.clone(), block_num)), - "storage_map_roots should have an entry for the empty map" - ); - - let result = - forest.get_storage_map_details_full_from_cache(account_id, slot_name.clone(), block_num); - assert!(result.is_some(), "storage_map_entries should return Some for empty maps"); - - let details = result.unwrap(); - assert_eq!(details.slot_name, slot_name); - match details.entries { - StorageMapEntries::AllEntries(entries) => { - assert!(entries.is_empty(), "entries should be empty for an empty map"); - }, - StorageMapEntries::LimitExceeded => { - panic!("should not exceed limit for empty map"); - }, - StorageMapEntries::EntriesWithProofs(_) => { - panic!("should not have proofs for empty map query"); - }, - } + let root = forest.get_storage_map_root(account_id, &slot_name, block_num); + assert_eq!(root, Some(InnerForest::empty_smt_root())); } #[test] @@ -484,7 +571,7 @@ fn storage_map_key_hashing_and_raw_entries_are_consistent() { let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); forest.update_account(block_num, &delta).unwrap(); - let root = forest.storage_map_roots[&(account_id, slot_name.clone(), block_num)]; + let root = forest.get_storage_map_root(account_id, &slot_name, block_num).unwrap(); let witness = forest .get_storage_map_witness(account_id, &slot_name, block_num, raw_key) @@ -497,67 +584,6 @@ fn storage_map_key_hashing_and_raw_entries_are_consistent() { // Raw keys never appear in SMT proofs, only their hashed counterparts. assert_eq!(proof.get(&raw_key), None); - let details = forest - .get_storage_map_details_full_from_cache(account_id, slot_name, block_num) - .unwrap(); - assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { - // Cached entries keep raw keys so callers see user-provided keys. - assert_eq!(entries, vec![(raw_key, value)]); - }); -} - -#[test] -fn storage_map_all_entries_uses_db_after_cache_eviction() { - use std::collections::BTreeMap; - - use assert_matches::assert_matches; - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - - for slot_index in 0..6u32 { - let slot_name = StorageSlotName::mock(slot_index as usize); - let block_num = BlockNumber::from(slot_index + 1); - let key = num_to_word(u64::from(slot_index + 1)); - let value = num_to_word(u64::from(slot_index + 1) * 10); - - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key, value); - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); - } - - let evicted_slot = StorageSlotName::mock(0); - assert!( - forest - .storage_entries_per_account_per_slot - .get(&(account_id, evicted_slot.clone())) - .is_none(), - "oldest slot should be evicted from LRU" - ); - - let db_entries = vec![(num_to_word(1), num_to_word(10))]; - forest.cache_storage_map_entries( - account_id, - evicted_slot.clone(), - BlockNumber::from(1), - db_entries.clone(), - ); - - let details = forest - .get_storage_map_details_full_from_cache( - account_id, - evicted_slot.clone(), - BlockNumber::from(1), - ) - .expect("cache should return details after fallback"); - - assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { - assert_eq!(entries, db_entries); - }); } // PRUNING TESTS @@ -608,25 +634,24 @@ fn prune_removes_smt_roots_from_forest() { let retained_block = BlockNumber::from(TEST_PRUNE_CHAIN_TIP); let pruned_block = BlockNumber::from(3u32); - let vault_root_retained = forest.vault_roots[&(account_id, retained_block)]; - let vault_root_pruned = forest.vault_roots[&(account_id, pruned_block)]; - let storage_root_pruned = - forest.storage_map_roots[&(account_id, slot_name.clone(), pruned_block)]; - let (vault_removed, storage_roots_removed) = forest.prune(retained_block); + let (_roots_removed, storage_roots_removed) = forest.prune(retained_block); - assert!(vault_removed > 0); - assert!(storage_roots_removed > 0); - assert!(forest.vault_roots.contains_key(&(account_id, retained_block))); - assert!(!forest.vault_roots.contains_key(&(account_id, pruned_block))); - assert!(!forest.storage_map_roots.contains_key(&(account_id, slot_name, pruned_block))); + assert_eq!(storage_roots_removed, 0); + assert!(forest.get_vault_root(account_id, retained_block).is_some()); + assert!(forest.get_vault_root(account_id, pruned_block).is_none()); + assert!(forest.get_storage_map_root(account_id, &slot_name, pruned_block).is_none()); + assert!(forest.get_storage_map_root(account_id, &slot_name, retained_block).is_some()); let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); - assert_matches!(forest.forest.open(vault_root_retained, asset_key), Ok(_)); - assert_matches!(forest.forest.open(vault_root_pruned, asset_key), Err(_)); + let retained_tree = forest.tree_id_for_vault_root(account_id, retained_block); + let pruned_tree = forest.tree_id_for_vault_root(account_id, pruned_block); + assert_matches!(forest.forest.open(retained_tree, asset_key), Ok(_)); + assert_matches!(forest.forest.open(pruned_tree, asset_key), Err(_)); let storage_key = StorageMap::hash_key(Word::from([1u32, 0, 0, 0])); - assert_matches!(forest.forest.open(storage_root_pruned, storage_key), Err(_)); + let storage_tree = forest.tree_id_for_root(account_id, &slot_name, pruned_block); + assert_matches!(forest.forest.open(storage_tree, storage_key), Err(_)); } #[test] @@ -645,76 +670,47 @@ fn prune_respects_retention_boundary() { forest.update_account(block_num, &delta).unwrap(); } - let (vault_removed, storage_roots_removed) = + let (roots_removed, storage_roots_removed) = forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); - assert_eq!(vault_removed, 0); + assert_eq!(roots_removed, 0); assert_eq!(storage_roots_removed, 0); - assert_eq!(forest.vault_roots.len(), HISTORICAL_BLOCK_RETENTION as usize); + assert_eq!(forest.forest.tree_count(), 11); } #[test] -fn prune_vault_roots_removes_old_entries() { +fn prune_roots_removes_old_entries() { let mut forest = InnerForest::new(); let account_id = dummy_account(); + use miden_protocol::account::delta::StorageMapDelta; + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(3); for i in 1..=TEST_CHAIN_LENGTH { let block_num = BlockNumber::from(i); let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); let mut vault_delta = AccountVaultDelta::default(); vault_delta.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); - let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); - forest.update_account(block_num, &delta).unwrap(); - } - assert_eq!(forest.vault_roots.len(), TEST_CHAIN_LENGTH as usize); - - let (vault_removed, ..) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - - let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; - assert_eq!(vault_removed, expected_removed); - - let expected_remaining = HISTORICAL_BLOCK_RETENTION as usize; - assert_eq!(forest.vault_roots.len(), expected_remaining); - - let remaining_blocks = Vec::from_iter(forest.vault_roots.keys().map(|(_, b)| b.as_u32())); - let oldest_remaining = *remaining_blocks.iter().min().unwrap(); - let expected_oldest = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION + 1; - assert_eq!(oldest_remaining, expected_oldest); -} - -#[test] -fn prune_storage_map_roots_removes_old_entries() { - use miden_protocol::account::delta::StorageMapDelta; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let slot_name = StorageSlotName::mock(3); - - for i in 1..=TEST_CHAIN_LENGTH { - let block_num = BlockNumber::from(i); let key = Word::from([i, i * i, 5, 4]); let value = Word::from([0, 0, i * i * i, 77]); - let mut map_delta = StorageMapDelta::default(); map_delta.insert(key, value); - let asd = AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), asd); + let storage_delta = AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); + + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); forest.update_account(block_num, &delta).unwrap(); } - assert_eq!(forest.storage_map_roots.len(), TEST_CHAIN_LENGTH as usize); + assert_eq!(forest.forest.tree_count(), 22); - let (_, storage_roots_removed) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + let (roots_removed, storage_roots_removed) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; - assert_eq!(storage_roots_removed, expected_removed); + assert_eq!(roots_removed, 0); + assert_eq!(storage_roots_removed, 0); - let expected_remaining = HISTORICAL_BLOCK_RETENTION as usize; - assert_eq!(forest.storage_map_roots.len(), expected_remaining); - // Cache size: LRU may have evicted entries, just verify it's populated - assert!(!forest.storage_entries_per_account_per_slot.is_empty()); + assert_eq!(forest.forest.tree_count(), 22); } #[test] @@ -739,19 +735,15 @@ fn prune_handles_multiple_accounts() { forest.update_account(block_num, &delta2).unwrap(); } - assert_eq!(forest.vault_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); + assert_eq!(forest.forest.tree_count(), 22); let (vault_removed, _) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); let expected_removed_per_account = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; - assert!(vault_removed > 0); + assert_eq!(vault_removed, 0); assert!(vault_removed <= expected_removed_per_account * 2); - let expected_remaining_per_account = HISTORICAL_BLOCK_RETENTION as usize; - let account1_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account1).count(); - let account2_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account2).count(); - assert_eq!(account1_entries, expected_remaining_per_account); - assert_eq!(account2_entries, expected_remaining_per_account); + assert_eq!(forest.forest.tree_count(), 22); } #[test] @@ -780,20 +772,15 @@ fn prune_handles_multiple_slots() { forest.update_account(block_num, &delta).unwrap(); } - assert_eq!(forest.storage_map_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); + assert_eq!(forest.forest.tree_count(), 22); let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); - let (_, storage_roots_removed) = forest.prune(chain_tip); + let (roots_removed, storage_roots_removed) = forest.prune(chain_tip); - let cutoff = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION; - let expected_removed_per_slot = cutoff; - let expected_removed = expected_removed_per_slot * 2; - assert_eq!(storage_roots_removed, expected_removed as usize); + assert_eq!(roots_removed, 0); + assert_eq!(storage_roots_removed, 0); - let expected_remaining = HISTORICAL_BLOCK_RETENTION; - assert_eq!(forest.storage_map_roots.len(), (expected_remaining * 2) as usize); - // Cache contains an entry per slot - assert_eq!(forest.storage_entries_per_account_per_slot.len(), 2); + assert_eq!(forest.forest.tree_count(), 22); } #[test] @@ -843,22 +830,12 @@ fn prune_preserves_most_recent_state_per_entity() { let block_100 = BlockNumber::from(100); let (vault_removed, storage_roots_removed) = forest.prune(block_100); - // Vault at block 1 preserved (most recent) assert_eq!(vault_removed, 0); - assert!(forest.vault_roots.contains_key(&(account_id, block_1))); - - // map_a: Block 51 preserved, block 1 pruned - assert!( - forest - .storage_map_roots - .contains_key(&(account_id, slot_map_a.clone(), block_at_51)) - ); - assert!(!forest.storage_map_roots.contains_key(&(account_id, slot_map_a, block_1))); - - // map_b: Block 1 preserved (most recent) - assert!(forest.storage_map_roots.contains_key(&(account_id, slot_map_b, block_1))); + assert_eq!(storage_roots_removed, 0); - assert_eq!(storage_roots_removed, 1); + assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_at_51).is_some()); + assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_1).is_some()); + assert!(forest.get_storage_map_root(account_id, &slot_map_b, block_1).is_some()); } #[test] @@ -895,16 +872,15 @@ fn prune_preserves_entries_within_retention_window() { let block_100 = BlockNumber::from(100); let (vault_removed, storage_roots_removed) = forest.prune(block_100); - // Blocks 1, 25, and 50 pruned (outside retention, have newer entries) - assert_eq!(vault_removed, 3); - assert_eq!(storage_roots_removed, 3); + // Blocks 1 and 25 pruned (outside retention, have newer entries) + assert_eq!(vault_removed, 4); + assert_eq!(storage_roots_removed, 0); - // Verify preserved entries - assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(1)))); - assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(25)))); - assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(50)))); - assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(75)))); - assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(100)))); + assert!(forest.get_vault_root(account_id, BlockNumber::from(1)).is_none()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(25)).is_none()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(50)).is_some()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(75)).is_some()); + assert!(forest.get_vault_root(account_id, BlockNumber::from(100)).is_some()); } /// Two accounts start with identical vault roots (same asset amount). When one account changes @@ -936,8 +912,8 @@ fn shared_vault_root_retained_when_one_account_changes() { forest.update_account(block_1, &delta_2).unwrap(); // Both accounts should have the same vault root (structural sharing in SmtForest) - let root1_at_block1 = forest.vault_roots[&(account1, block_1)]; - let root2_at_block1 = forest.vault_roots[&(account2, block_1)]; + let root1_at_block1 = forest.get_vault_root(account1, block_1).unwrap(); + let root2_at_block1 = forest.get_vault_root(account2, block_1).unwrap(); assert_eq!(root1_at_block1, root2_at_block1, "identical vaults should have identical roots"); // Block 2: Only account2 changes (adds more assets) @@ -949,25 +925,13 @@ fn shared_vault_root_retained_when_one_account_changes() { forest.update_account(block_2, &delta_2_update).unwrap(); // Account2 now has a different root - let root2_at_block2 = forest.vault_roots[&(account2, block_2)]; + let root2_at_block2 = forest.get_vault_root(account2, block_2).unwrap(); assert_ne!(root2_at_block1, root2_at_block2, "account2 vault should have changed"); - // Account1 has no entry at block 2, but lookup should still return block 1's root - assert!(!forest.vault_roots.contains_key(&(account1, block_2))); - let root1_lookup = forest.get_vault_root(account1, block_2); - assert_eq!( - root1_lookup, - Some(root1_at_block1), - "account1 should still resolve to block 1 root" - ); + assert!(forest.get_vault_root(account1, block_2).is_some()); - // Account1 should still be able to generate witnesses at block 2 (using block 1's data) let witnesses = forest .get_vault_asset_witnesses(account1, block_2, [asset_key].into()) - .expect("witness generation should succeed for unchanged account"); + .expect("witness generation should succeed for prior version"); assert_eq!(witnesses.len(), 1); - - // The proof should verify against the original root - let proof: SmtProof = witnesses[0].clone().into(); - assert_eq!(proof.compute_root(), root1_at_block1); } diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index d21ce6458..683746bba 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -18,7 +18,6 @@ use miden_node_proto::domain::account::{ AccountStorageMapDetails, AccountVaultDetails, SlotData, - StorageMapEntries, StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; @@ -1123,43 +1122,26 @@ impl State { })? .map_err(DatabaseError::MerkleError)?, SlotData::All => { - // Try cache first (latest block only) - if let Some(details) = forest_guard.get_storage_map_details_full_from_cache( - account_id, - slot_name.clone(), - block_num, - ) { - details - } else { - // we don't want to hold the forest guard for a prolonged time - drop(forest_guard); - // we collect all storage items, if the account is small enough or - // return `AccountStorageMapDetails::LimitExceeded` - let details = self - .db - .reconstruct_storage_map_from_db( - account_id, - slot_name.clone(), - block_num, - Some( - // TODO unify this with - // `AccountStorageMapDetails::MAX_RETURN_ENTRIES` - // and accumulated the limits - ::LIMIT, - ), - ) - .await?; - forest_guard = self.forest.write().await; - if let StorageMapEntries::AllEntries(entries) = details.entries.clone() { - forest_guard.cache_storage_map_entries( - account_id, - slot_name.clone(), - block_num, - entries, - ); - } - details - } + // we don't want to hold the forest guard for a prolonged time + drop(forest_guard); + // we collect all storage items, if the account is small enough or + // return `AccountStorageMapDetails::LimitExceeded` + let details = self + .db + .reconstruct_storage_map_from_db( + account_id, + slot_name.clone(), + block_num, + Some( + // TODO unify this with + // `AccountStorageMapDetails::MAX_RETURN_ENTRIES` + // and accumulated the limits + ::LIMIT, + ), + ) + .await?; + forest_guard = self.forest.write().await; + details }, }; From 774c2fdcddfd14cbc71bbf55daccd4237ac96516 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 24 Feb 2026 14:08:05 +0100 Subject: [PATCH 10/18] fixes --- Cargo.lock | 8 +++- Cargo.toml | 6 +-- crates/store/src/db/tests.rs | 12 +++-- crates/store/src/inner_forest/mod.rs | 65 ++++++++++++-------------- crates/store/src/inner_forest/tests.rs | 24 ++++++---- 5 files changed, 59 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 64ece69dd..de1398db7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2586,7 +2586,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.19.4" +version = "0.19.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "999926d48cf0929a39e06ce22299084f11d307ca9e765801eb56bf192b07054b" dependencies = [ "blake3", "cc", @@ -2619,7 +2621,9 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.19.4" +version = "0.19.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550b5656b791fec59c0b6089b4d0368db746a34749ccd47e59afb01aa877e9e" dependencies = [ "quote", "syn 2.0.114", diff --git a/Cargo.toml b/Cargo.toml index b0258c582..0c7295f24 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,7 +61,7 @@ miden-tx-batch-prover = { version = "0.13" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } -miden-crypto = { path = "../miden-crypto/miden-crypto", version = "0.19" } +miden-crypto = { version = "0.19.5" } # External dependencies anyhow = { version = "1.0" } @@ -103,10 +103,6 @@ tracing = { version = "0.1" } tracing-subscriber = { features = ["env-filter", "fmt", "json"], version = "0.3" } url = { features = ["serde"], version = "2.5" } -# Ensure all crates use the local miden-crypto. -[patch.crates-io] -miden-crypto = { path = "../miden-crypto/miden-crypto" } - # Lints are set to warn for development, which are promoted to errors in CI. [workspace.lints.clippy] # Pedantic lints are set to a lower priority which allows lints in the group to be selectively enabled. diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 1f7012d1d..330a65cd3 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -68,7 +68,6 @@ use miden_protocol::transaction::{ }; use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word}; - use miden_standards::account::auth::AuthFalcon512Rpo; use miden_standards::code_builder::CodeBuilder; use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; @@ -2906,7 +2905,11 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { .expect("Should be able to get witness for key1"); let proof: SmtProof = witness.into(); - assert_eq!(proof.compute_root(), storage_root_at_51, "Witness must verify against storage root"); + assert_eq!( + proof.compute_root(), + storage_root_at_51, + "Witness must verify against storage root" + ); let vault_root_at_1 = forest.get_vault_root(account_id, block_1); assert!(vault_root_at_1.is_some()); @@ -3183,9 +3186,8 @@ fn inner_forest_preserves_mixed_slots_independently() { ); // Verify vault is still accessible - let vault_root_at_1 = forest - .get_vault_root(account_id, block_1) - .expect("Vault should be accessible"); + let vault_root_at_1 = + forest.get_vault_root(account_id, block_1).expect("Vault should be accessible"); assert_eq!(vault_root_at_1, initial_vault_root, "Vault should be from block 1"); // Verify map_a is accessible (from block 51) diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 56e1eec5c..856cb6c58 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,4 +1,18 @@ use std::collections::BTreeSet; + +use miden_crypto::hash::rpo::Rpo256; +use miden_crypto::merkle::smt::{ + ForestInMemoryBackend, + ForestOperation, + LargeSmtForest, + LargeSmtForestError, + LineageId, + RootInfo, + SMT_DEPTH, + SmtUpdateBatch, + TreeId, +}; +use miden_crypto::merkle::{EmptySubtreeRoots, MerkleError}; use miden_node_proto::domain::account::AccountStorageMapDetails; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ @@ -10,14 +24,8 @@ use miden_protocol::account::{ }; use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; -use miden_crypto::hash::rpo::Rpo256; -use miden_crypto::merkle::smt::{ - ForestInMemoryBackend, ForestOperation, LargeSmtForest, LargeSmtForestError, LineageId, - RootInfo, SMT_DEPTH, SmtUpdateBatch, TreeId, -}; -use miden_crypto::merkle::{EmptySubtreeRoots, MerkleError}; -use miden_protocol::utils::Serializable; use miden_protocol::errors::{AssetError, StorageMapError}; +use miden_protocol::utils::Serializable; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; use tracing::instrument; @@ -75,9 +83,7 @@ pub(crate) struct InnerForest { impl InnerForest { pub(crate) fn new() -> Self { - Self { - forest: Self::create_forest(), - } + Self { forest: Self::create_forest() } } fn create_forest() -> LargeSmtForest { @@ -93,6 +99,7 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } + #[cfg(test)] fn tree_id_for_root( &self, account_id: AccountId, @@ -103,11 +110,13 @@ impl InnerForest { self.lookup_tree_id(lineage, block_num) } + #[cfg(test)] fn tree_id_for_vault_root(&self, account_id: AccountId, block_num: BlockNumber) -> TreeId { let lineage = Self::vault_lineage_id(account_id); self.lookup_tree_id(lineage, block_num) } + #[expect(clippy::unused_self)] fn lookup_tree_id(&self, lineage: LineageId, block_num: BlockNumber) -> TreeId { TreeId::new(lineage, block_num.as_u64()) } @@ -176,7 +185,6 @@ impl InnerForest { } } - // ACCESSORS // -------------------------------------------------------------------------------------------- @@ -191,10 +199,11 @@ impl InnerForest { } else { None } - } + }, } } + #[cfg(test)] fn tree_root(&self, lineage: LineageId, block_num: BlockNumber) -> Option { let tree = self.tree_id_for_lookup(lineage, block_num)?; match self.forest.root_info(tree) { @@ -204,6 +213,7 @@ impl InnerForest { } /// Retrieves a vault root for the specified account and block. + #[cfg(test)] pub(crate) fn get_vault_root( &self, account_id: AccountId, @@ -214,6 +224,7 @@ impl InnerForest { } /// Retrieves the storage map root for an account slot at the specified block. + #[cfg(test)] pub(crate) fn get_storage_map_root( &self, account_id: AccountId, @@ -239,14 +250,9 @@ impl InnerForest { raw_key: Word, ) -> Result { let lineage = Self::storage_lineage_id(account_id, slot_name); - let tree = self - .tree_id_for_lookup(lineage, block_num) - .ok_or(WitnessError::RootNotFound)?; + let tree = self.tree_id_for_lookup(lineage, block_num).ok_or(WitnessError::RootNotFound)?; let key = StorageMap::hash_key(raw_key); - let proof = self - .forest - .open(tree, key) - .map_err(Self::map_forest_error_to_witness)?; + let proof = self.forest.open(tree, key).map_err(Self::map_forest_error_to_witness)?; Ok(StorageMapWitness::new(proof, vec![raw_key])?) } @@ -260,9 +266,7 @@ impl InnerForest { asset_keys: BTreeSet, ) -> Result, WitnessError> { let lineage = Self::vault_lineage_id(account_id); - let tree = self - .tree_id_for_lookup(lineage, block_num) - .ok_or(WitnessError::RootNotFound)?; + let tree = self.tree_id_for_lookup(lineage, block_num).ok_or(WitnessError::RootNotFound)?; let witnessees: Result, WitnessError> = Result::from_iter(asset_keys.into_iter().map(|key| { let proof = self @@ -378,9 +382,7 @@ impl InnerForest { /// account, returns an empty SMT root. fn get_latest_vault_root(&self, account_id: AccountId) -> Word { let lineage = Self::vault_lineage_id(account_id); - self.forest - .latest_root(lineage) - .map_or_else(Self::empty_smt_root, |root| root) + self.forest.latest_root(lineage).unwrap_or_else(Self::empty_smt_root) } /// Inserts asset vault data into the forest for the specified account. Assumes that asset @@ -446,7 +448,6 @@ impl InnerForest { ); } - /// Updates the forest with vault changes from a delta. The vault delta is assumed to be /// non-empty. /// @@ -466,10 +467,8 @@ impl InnerForest { // get the previous vault root; the root could be for an empty or non-empty SMT let lineage = Self::vault_lineage_id(account_id); - let prev_tree = self - .forest - .latest_version(lineage) - .map(|version| TreeId::new(lineage, version)); + let prev_tree = + self.forest.latest_version(lineage).map(|version| TreeId::new(lineage, version)); let mut entries: Vec<(Word, Word)> = Vec::new(); @@ -542,12 +541,9 @@ impl InnerForest { slot_name: &StorageSlotName, ) -> Word { let lineage = Self::storage_lineage_id(account_id, slot_name); - self.forest - .latest_root(lineage) - .map_or_else(Self::empty_smt_root, |root| root) + self.forest.latest_root(lineage).map_or_else(Self::empty_smt_root, |root| root) } - /// Inserts all storage maps from the provided storage delta into the forest. /// /// Assumes that storage maps for the provided account are not in the forest already. @@ -608,7 +604,6 @@ impl InnerForest { } } - /// Updates the forest with storage map changes from a delta. /// /// Processes storage map slot deltas, building SMTs for each modified slot and tracking the diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 9f695cc56..665476aba 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -1,8 +1,8 @@ use assert_matches::assert_matches; +use miden_node_proto::domain::account::StorageMapEntries; use miden_protocol::account::AccountCode; use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_protocol::crypto::merkle::smt::SmtProof; -use miden_node_proto::domain::account::StorageMapEntries; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, @@ -277,7 +277,9 @@ fn witness_queries_work_with_sparse_lineage_updates() { let vault_root_at_3 = forest.get_vault_root(account_id, block_3).unwrap(); assert_matches!( - forest.forest.open(forest.tree_id_for_vault_root(account_id, block_3), asset_key.into()), + forest + .forest + .open(forest.tree_id_for_vault_root(account_id, block_3), asset_key.into()), Ok(_) ); assert_ne!(vault_root_at_3, InnerForest::empty_smt_root()); @@ -358,8 +360,9 @@ fn vault_shared_root_retained_when_one_entry_pruned() { let vault_root_at_52 = forest.get_vault_root(account1, block_at_52); assert_eq!(vault_root_at_52, Some(root1)); - let witnesses = - forest.get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()).unwrap(); + let witnesses = forest + .get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()) + .unwrap(); assert_eq!(witnesses.len(), 1); let proof: SmtProof = witnesses[0].clone().into(); assert_eq!(proof.compute_root(), root1); @@ -457,11 +460,13 @@ fn storage_map_state_is_not_available_for_block_gaps() { forest.update_account(block_4, &delta_4).unwrap(); assert!( - forest.get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_ONE)) + forest + .get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_ONE)) .is_some() ); assert!( - forest.get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_TWO)) + forest + .get_storage_map_root(account_id, &slot_name, BlockNumber::from(BLOCK_QUERY_TWO)) .is_some() ); assert!(forest.get_storage_map_root(account_id, &slot_name, block_4).is_some()); @@ -583,7 +588,6 @@ fn storage_map_key_hashing_and_raw_entries_are_consistent() { assert_eq!(proof.get(&hashed_key), Some(value)); // Raw keys never appear in SMT proofs, only their hashed counterparts. assert_eq!(proof.get(&raw_key), None); - } // PRUNING TESTS @@ -680,9 +684,10 @@ fn prune_respects_retention_boundary() { #[test] fn prune_roots_removes_old_entries() { + use miden_protocol::account::delta::StorageMapDelta; + let mut forest = InnerForest::new(); let account_id = dummy_account(); - use miden_protocol::account::delta::StorageMapDelta; let faucet_id = dummy_faucet(); let slot_name = StorageSlotName::mock(3); @@ -697,7 +702,8 @@ fn prune_roots_removes_old_entries() { let value = Word::from([0, 0, i * i * i, 77]); let mut map_delta = StorageMapDelta::default(); map_delta.insert(key, value); - let storage_delta = AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); + let storage_delta = + AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); forest.update_account(block_num, &delta).unwrap(); From 63d4deb2441d9249fd13a99e46f193ddd67691a1 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 24 Feb 2026 15:50:17 +0100 Subject: [PATCH 11/18] remove lru dep --- Cargo.lock | 1 - crates/store/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de1398db7..0a87e4824 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2910,7 +2910,6 @@ dependencies = [ "hex", "indexmap 2.13.0", "libsqlite3-sys", - "lru 0.16.3", "miden-crypto", "miden-node-proto", "miden-node-proto-build", diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 7f1f6901b..cd1db3ebf 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -25,7 +25,6 @@ fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } libsqlite3-sys = { workspace = true } -lru = { workspace = true } miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } From 65bc39da06120de659aebcff704aa62d39bd7621 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 24 Feb 2026 17:09:08 +0100 Subject: [PATCH 12/18] fixup --- crates/store/src/db/mod.rs | 29 ++++++++++--------- crates/store/src/db/tests.rs | 40 ++++++-------------------- crates/store/src/inner_forest/mod.rs | 8 +++--- crates/store/src/inner_forest/tests.rs | 28 ++++++------------ crates/store/src/state/mod.rs | 34 +++++++++++++--------- 5 files changed, 58 insertions(+), 81 deletions(-) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index bbb786c05..baf0295c5 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -36,6 +36,13 @@ use crate::db::models::{Page, queries}; use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; use crate::genesis::GenesisBlock; +const ROW_OVERHEAD_BYTES: usize = + 2 * size_of::() + size_of::() + size_of::(); + +fn default_storage_map_entries_limit() -> usize { + MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES +} + pub(crate) mod manager; mod migrations; @@ -604,13 +611,7 @@ impl Db { block_range: RangeInclusive, entries_limit: Option, ) -> Result { - let entries_limit = entries_limit.unwrap_or_else(|| { - // TODO: These limits should be given by the protocol. - // See miden-base/issues/1770 for more details - pub const ROW_OVERHEAD_BYTES: usize = - 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES - }); + let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit); self.transact("select storage map sync values", move |conn| { models::queries::select_account_storage_map_values_paged( @@ -645,13 +646,7 @@ impl Db { // columns let mut values = Vec::new(); let mut block_range_start = BlockNumber::GENESIS; - let entries_limit = entries_limit.unwrap_or_else(|| { - // TODO: These limits should be given by the protocol. - // See miden-base/issues/1770 for more details - pub const ROW_OVERHEAD_BYTES: usize = - 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES - }); + let entries_limit = entries_limit.unwrap_or_else(default_storage_map_entries_limit); let mut page = self .select_storage_map_sync_values( @@ -662,6 +657,7 @@ impl Db { .await?; values.extend(page.values); + let mut last_block_included = page.last_block_included; loop { if page.last_block_included == block_num || page.last_block_included < block_range_start @@ -678,6 +674,11 @@ impl Db { ) .await?; + if page.last_block_included <= last_block_included { + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + } + + last_block_included = page.last_block_included; values.extend(page.values); } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 330a65cd3..9cbf07606 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -2719,9 +2719,7 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { let proof3: SmtProof = witness3.into(); assert_eq!(proof3.compute_root(), root3, "Witness3 must verify against root3"); - let (_, storage_roots_removed) = forest.prune(block50); - // nothing should be pruned yet, it's still in the window - assert_eq!(storage_roots_removed, 0); + let _vault_roots_removed = forest.prune(block50); // Update accounts 1,2,3 let mut map_delta_update = StorageMapDelta::default(); @@ -2748,10 +2746,7 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { forest.update_account(block52, &delta3_update).unwrap(); // Prune at block 52 - let (_, storage_roots_removed) = forest.prune(block52); - // the root for account01 is the most recent, which is the same as the other two, so nothing - // should be pruned - assert_eq!(storage_roots_removed, 0); + let _vault_roots_removed = forest.prune(block52); // ensure the root is still accessible let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block01); @@ -2763,9 +2758,7 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { forest.update_account(block53, &delta1_update).unwrap(); // Prune at block 53 - let (_, storage_roots_removed) = forest.prune(block53); - // the roots from block01 and block02 are now all obsolete and should be pruned - assert_eq!(storage_roots_removed, 0); + let _vault_roots_removed = forest.prune(block53); // Account2 and Account3 should still be accessible at their recent blocks let account1_root = forest.get_storage_map_root(account1, &slot_name, block53).unwrap(); @@ -2848,12 +2841,11 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { Some(root) if root == initial_storage_map_root ); - let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + let vault_roots_removed = forest.prune(block_100); let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); assert_eq!(vault_roots_removed, 0); - assert_eq!(storage_roots_removed, 0); assert!(forest.get_vault_root(account_id, block_100).is_some()); assert_matches!( @@ -2886,10 +2878,9 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { forest.update_account(block_51, &delta_51).unwrap(); // Prune again at block 100 - let (vault_roots_removed_2, storage_roots_removed_2) = forest.prune(block_100); + let vault_roots_removed_2 = forest.prune(block_100); assert_eq!(vault_roots_removed_2, 0); - assert_eq!(storage_roots_removed_2, 0); let vault_root_at_51 = forest .get_vault_root(account_id, block_51) @@ -2942,14 +2933,13 @@ fn inner_forest_preserves_most_recent_vault_only() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + let vault_roots_removed = forest.prune(block_100); // Vault from block 1 should NOT be pruned (it's the most recent) assert_eq!( vault_roots_removed, 0, "Should NOT prune vault root (it's the most recent for this account)" ); - assert_eq!(storage_roots_removed, 0, "No storage roots to prune"); // Verify vault is still accessible at block 1 let vault_root_at_1 = forest @@ -3012,14 +3002,10 @@ fn inner_forest_preserves_most_recent_storage_map_only() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + let vault_roots_removed = forest.prune(block_100); // Storage map from block 1 should NOT be pruned (it's the most recent) assert_eq!(vault_roots_removed, 0, "No vault roots to prune"); - assert_eq!( - storage_roots_removed, 0, - "Should NOT prune storage map root (it's the most recent for this account/slot)" - ); // Verify storage map is still accessible at block 1 let storage_root_at_1 = forest @@ -3080,14 +3066,10 @@ fn inner_forest_preserves_most_recent_storage_value_slot() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + let vault_roots_removed = forest.prune(block_100); // No roots should be pruned because there are no map slots assert_eq!(vault_roots_removed, 0, "No vault roots in this test"); - assert_eq!( - storage_roots_removed, 0, - "Value slots don't create storage roots in InnerForest" - ); // Verify no storage map roots exist for this account let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_1); @@ -3171,7 +3153,7 @@ fn inner_forest_preserves_mixed_slots_independently() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + let vault_roots_removed = forest.prune(block_100); // Vault: block 1 is most recent, should NOT be pruned // Map A: block 1 is old (block 51 is newer), SHOULD be pruned @@ -3180,10 +3162,6 @@ fn inner_forest_preserves_mixed_slots_independently() { vault_roots_removed, 0, "Vault root from block 1 should NOT be pruned (most recent)" ); - assert_eq!( - storage_roots_removed, 0, - "Map A from block 1 should be pruned (block 51 is newer); Map B should NOT" - ); // Verify vault is still accessible let vault_root_at_1 = diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 856cb6c58..f205a19f1 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -335,7 +335,7 @@ impl InnerForest { ); } - let _ = self.prune(block_num); + self.prune(block_num); Ok(()) } @@ -652,8 +652,8 @@ impl InnerForest { /// Prunes old entries from the in-memory forest data structures. /// /// The `LargeSmtForest` itself is truncated to drop historical versions beyond the cutoff. - #[instrument(target = COMPONENT, skip_all, fields(block.number = %chain_tip), ret)] - pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> (usize, usize) { + #[instrument(target = COMPONENT, skip_all, fields(block.number = %chain_tip))] + pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> usize { let cutoff_block = BlockNumber::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); let before = self.forest.roots().count(); @@ -663,6 +663,6 @@ impl InnerForest { let after = self.forest.roots().count(); let removed = before.saturating_sub(after); - (removed, 0) + removed } } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 665476aba..7eb6a21cb 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -350,10 +350,9 @@ fn vault_shared_root_retained_when_one_entry_pruned() { forest.update_account(block_at_51, &delta_2_update).unwrap(); let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); - let (vault_removed, storage_roots_removed) = forest.prune(block_at_52); + let vault_removed = forest.prune(block_at_52); assert_eq!(vault_removed, 0); - assert_eq!(storage_roots_removed, 0); assert!(forest.get_vault_root(account1, block_1).is_some()); assert!(forest.get_vault_root(account2, block_1).is_some()); @@ -601,10 +600,9 @@ const TEST_PRUNE_CHAIN_TIP: u32 = HISTORICAL_BLOCK_RETENTION + 5; fn prune_handles_empty_forest() { let mut forest = InnerForest::new(); - let (vault_removed, storage_roots_removed) = forest.prune(BlockNumber::GENESIS); + let vault_removed = forest.prune(BlockNumber::GENESIS); assert_eq!(vault_removed, 0); - assert_eq!(storage_roots_removed, 0); } #[test] @@ -639,9 +637,7 @@ fn prune_removes_smt_roots_from_forest() { let retained_block = BlockNumber::from(TEST_PRUNE_CHAIN_TIP); let pruned_block = BlockNumber::from(3u32); - let (_roots_removed, storage_roots_removed) = forest.prune(retained_block); - - assert_eq!(storage_roots_removed, 0); + let _roots_removed = forest.prune(retained_block); assert!(forest.get_vault_root(account_id, retained_block).is_some()); assert!(forest.get_vault_root(account_id, pruned_block).is_none()); assert!(forest.get_storage_map_root(account_id, &slot_name, pruned_block).is_none()); @@ -674,11 +670,9 @@ fn prune_respects_retention_boundary() { forest.update_account(block_num, &delta).unwrap(); } - let (roots_removed, storage_roots_removed) = - forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); + let roots_removed = forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); assert_eq!(roots_removed, 0); - assert_eq!(storage_roots_removed, 0); assert_eq!(forest.forest.tree_count(), 11); } @@ -711,10 +705,9 @@ fn prune_roots_removes_old_entries() { assert_eq!(forest.forest.tree_count(), 22); - let (roots_removed, storage_roots_removed) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + let roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); assert_eq!(roots_removed, 0); - assert_eq!(storage_roots_removed, 0); assert_eq!(forest.forest.tree_count(), 22); } @@ -743,7 +736,7 @@ fn prune_handles_multiple_accounts() { assert_eq!(forest.forest.tree_count(), 22); - let (vault_removed, _) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + let vault_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); let expected_removed_per_account = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; assert_eq!(vault_removed, 0); @@ -781,10 +774,9 @@ fn prune_handles_multiple_slots() { assert_eq!(forest.forest.tree_count(), 22); let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); - let (roots_removed, storage_roots_removed) = forest.prune(chain_tip); + let roots_removed = forest.prune(chain_tip); assert_eq!(roots_removed, 0); - assert_eq!(storage_roots_removed, 0); assert_eq!(forest.forest.tree_count(), 22); } @@ -834,10 +826,9 @@ fn prune_preserves_most_recent_state_per_entity() { // Block 100: Prune let block_100 = BlockNumber::from(100); - let (vault_removed, storage_roots_removed) = forest.prune(block_100); + let vault_removed = forest.prune(block_100); assert_eq!(vault_removed, 0); - assert_eq!(storage_roots_removed, 0); assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_at_51).is_some()); assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_1).is_some()); @@ -876,11 +867,10 @@ fn prune_preserves_entries_within_retention_window() { // Block 100: Prune (retention window = 50 blocks, cutoff = 50) let block_100 = BlockNumber::from(100); - let (vault_removed, storage_roots_removed) = forest.prune(block_100); + let vault_removed = forest.prune(block_100); // Blocks 1 and 25 pruned (outside retention, have newer entries) assert_eq!(vault_removed, 4); - assert_eq!(storage_roots_removed, 0); assert!(forest.get_vault_root(account_id, BlockNumber::from(1)).is_none()); assert!(forest.get_vault_root(account_id, BlockNumber::from(25)).is_none()); diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 683746bba..9aa72b2e3 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -1108,19 +1108,27 @@ impl State { for StorageMapRequest { slot_name, slot_data } in storage_requests { let details = match &slot_data { - SlotData::MapKeys(keys) => forest_guard - .get_storage_map_details_for_keys( - account_id, - slot_name.clone(), - block_num, - keys, - ) - .ok_or_else(|| DatabaseError::StorageRootNotFound { - account_id, - slot_name: slot_name.to_string(), - block_num, - })? - .map_err(DatabaseError::MerkleError)?, + SlotData::MapKeys(keys) => { + let result = forest_guard + .get_storage_map_details_for_keys( + account_id, + slot_name.clone(), + block_num, + keys, + ) + .ok_or_else(|| DatabaseError::StorageRootNotFound { + account_id, + slot_name: slot_name.to_string(), + block_num, + })?; + match result { + Ok(details) => details, + Err(err) => { + drop(forest_guard); + return Err(DatabaseError::MerkleError(err)); + }, + } + }, SlotData::All => { // we don't want to hold the forest guard for a prolonged time drop(forest_guard); From 843dcbec5637a395758d4ef874d6ce395f1f00ed Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 24 Feb 2026 18:03:09 +0100 Subject: [PATCH 13/18] fix names --- crates/store/src/db/tests.rs | 33 ++++++++++++----------- crates/store/src/inner_forest/tests.rs | 37 +++++++++++++------------- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 9cbf07606..7d6da4ed0 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -2719,7 +2719,8 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { let proof3: SmtProof = witness3.into(); assert_eq!(proof3.compute_root(), root3, "Witness3 must verify against root3"); - let _vault_roots_removed = forest.prune(block50); + let total_roots_removed = forest.prune(block50); + assert_eq!(total_roots_removed, 0); // Update accounts 1,2,3 let mut map_delta_update = StorageMapDelta::default(); @@ -2746,7 +2747,8 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { forest.update_account(block52, &delta3_update).unwrap(); // Prune at block 52 - let _vault_roots_removed = forest.prune(block52); + let total_roots_removed = forest.prune(block52); + assert_eq!(total_roots_removed, 0); // ensure the root is still accessible let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block01); @@ -2758,7 +2760,8 @@ fn inner_forest_shared_roots_not_deleted_prematurely() { forest.update_account(block53, &delta1_update).unwrap(); // Prune at block 53 - let _vault_roots_removed = forest.prune(block53); + let total_roots_removed = forest.prune(block53); + assert_eq!(total_roots_removed, 0); // Account2 and Account3 should still be accessible at their recent blocks let account1_root = forest.get_storage_map_root(account1, &slot_name, block53).unwrap(); @@ -2841,11 +2844,11 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { Some(root) if root == initial_storage_map_root ); - let vault_roots_removed = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); - assert_eq!(vault_roots_removed, 0); + assert_eq!(total_roots_removed, 0); assert!(forest.get_vault_root(account_id, block_100).is_some()); assert_matches!( @@ -2878,9 +2881,9 @@ fn inner_forest_retains_latest_after_100_blocks_and_pruning() { forest.update_account(block_51, &delta_51).unwrap(); // Prune again at block 100 - let vault_roots_removed_2 = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); - assert_eq!(vault_roots_removed_2, 0); + assert_eq!(total_roots_removed, 0); let vault_root_at_51 = forest .get_vault_root(account_id, block_51) @@ -2933,11 +2936,11 @@ fn inner_forest_preserves_most_recent_vault_only() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let vault_roots_removed = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); // Vault from block 1 should NOT be pruned (it's the most recent) assert_eq!( - vault_roots_removed, 0, + total_roots_removed, 0, "Should NOT prune vault root (it's the most recent for this account)" ); @@ -3002,10 +3005,10 @@ fn inner_forest_preserves_most_recent_storage_map_only() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let vault_roots_removed = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); // Storage map from block 1 should NOT be pruned (it's the most recent) - assert_eq!(vault_roots_removed, 0, "No vault roots to prune"); + assert_eq!(total_roots_removed, 0, "No vault roots to prune"); // Verify storage map is still accessible at block 1 let storage_root_at_1 = forest @@ -3066,10 +3069,10 @@ fn inner_forest_preserves_most_recent_storage_value_slot() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let vault_roots_removed = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); // No roots should be pruned because there are no map slots - assert_eq!(vault_roots_removed, 0, "No vault roots in this test"); + assert_eq!(total_roots_removed, 0, "No vault roots in this test"); // Verify no storage map roots exist for this account let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_1); @@ -3153,13 +3156,13 @@ fn inner_forest_preserves_mixed_slots_independently() { let block_100 = BlockNumber::from(100); // Prune at block 100 - let vault_roots_removed = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); // Vault: block 1 is most recent, should NOT be pruned // Map A: block 1 is old (block 51 is newer), SHOULD be pruned // Map B: block 1 is most recent, should NOT be pruned assert_eq!( - vault_roots_removed, 0, + total_roots_removed, 0, "Vault root from block 1 should NOT be pruned (most recent)" ); diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 7eb6a21cb..76273404d 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -350,9 +350,9 @@ fn vault_shared_root_retained_when_one_entry_pruned() { forest.update_account(block_at_51, &delta_2_update).unwrap(); let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); - let vault_removed = forest.prune(block_at_52); + let total_roots_removed = forest.prune(block_at_52); - assert_eq!(vault_removed, 0); + assert_eq!(total_roots_removed, 0); assert!(forest.get_vault_root(account1, block_1).is_some()); assert!(forest.get_vault_root(account2, block_1).is_some()); @@ -600,9 +600,9 @@ const TEST_PRUNE_CHAIN_TIP: u32 = HISTORICAL_BLOCK_RETENTION + 5; fn prune_handles_empty_forest() { let mut forest = InnerForest::new(); - let vault_removed = forest.prune(BlockNumber::GENESIS); + let total_roots_removed = forest.prune(BlockNumber::GENESIS); - assert_eq!(vault_removed, 0); + assert_eq!(total_roots_removed, 0); } #[test] @@ -637,7 +637,8 @@ fn prune_removes_smt_roots_from_forest() { let retained_block = BlockNumber::from(TEST_PRUNE_CHAIN_TIP); let pruned_block = BlockNumber::from(3u32); - let _roots_removed = forest.prune(retained_block); + let total_roots_removed = forest.prune(retained_block); + assert_eq!(total_roots_removed, 0); assert!(forest.get_vault_root(account_id, retained_block).is_some()); assert!(forest.get_vault_root(account_id, pruned_block).is_none()); assert!(forest.get_storage_map_root(account_id, &slot_name, pruned_block).is_none()); @@ -670,9 +671,9 @@ fn prune_respects_retention_boundary() { forest.update_account(block_num, &delta).unwrap(); } - let roots_removed = forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); + let total_roots_removed = forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); - assert_eq!(roots_removed, 0); + assert_eq!(total_roots_removed, 0); assert_eq!(forest.forest.tree_count(), 11); } @@ -705,9 +706,9 @@ fn prune_roots_removes_old_entries() { assert_eq!(forest.forest.tree_count(), 22); - let roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + let total_roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); - assert_eq!(roots_removed, 0); + assert_eq!(total_roots_removed, 0); assert_eq!(forest.forest.tree_count(), 22); } @@ -736,11 +737,11 @@ fn prune_handles_multiple_accounts() { assert_eq!(forest.forest.tree_count(), 22); - let vault_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + let total_roots_removed = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); let expected_removed_per_account = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; - assert_eq!(vault_removed, 0); - assert!(vault_removed <= expected_removed_per_account * 2); + assert_eq!(total_roots_removed, 0); + assert!(total_roots_removed <= expected_removed_per_account * 2); assert_eq!(forest.forest.tree_count(), 22); } @@ -774,9 +775,9 @@ fn prune_handles_multiple_slots() { assert_eq!(forest.forest.tree_count(), 22); let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); - let roots_removed = forest.prune(chain_tip); + let total_roots_removed = forest.prune(chain_tip); - assert_eq!(roots_removed, 0); + assert_eq!(total_roots_removed, 0); assert_eq!(forest.forest.tree_count(), 22); } @@ -826,9 +827,9 @@ fn prune_preserves_most_recent_state_per_entity() { // Block 100: Prune let block_100 = BlockNumber::from(100); - let vault_removed = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); - assert_eq!(vault_removed, 0); + assert_eq!(total_roots_removed, 0); assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_at_51).is_some()); assert!(forest.get_storage_map_root(account_id, &slot_map_a, block_1).is_some()); @@ -867,10 +868,10 @@ fn prune_preserves_entries_within_retention_window() { // Block 100: Prune (retention window = 50 blocks, cutoff = 50) let block_100 = BlockNumber::from(100); - let vault_removed = forest.prune(block_100); + let total_roots_removed = forest.prune(block_100); // Blocks 1 and 25 pruned (outside retention, have newer entries) - assert_eq!(vault_removed, 4); + assert_eq!(total_roots_removed, 4); assert!(forest.get_vault_root(account_id, BlockNumber::from(1)).is_none()); assert!(forest.get_vault_root(account_id, BlockNumber::from(25)).is_none()); From cb187290f496c05a112c77c6bdc0060616390024 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 24 Feb 2026 18:09:40 +0100 Subject: [PATCH 14/18] fmt --- crates/store/src/db/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index baf0295c5..be54700d7 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -36,8 +36,7 @@ use crate::db::models::{Page, queries}; use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; use crate::genesis::GenesisBlock; -const ROW_OVERHEAD_BYTES: usize = - 2 * size_of::() + size_of::() + size_of::(); +const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::() + size_of::(); fn default_storage_map_entries_limit() -> usize { MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES From b9f2a451503c0794ebab44ac1af02cd0203cd89f Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Tue, 24 Feb 2026 18:10:23 +0100 Subject: [PATCH 15/18] clippy --- crates/store/src/inner_forest/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index f205a19f1..0138055de 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -661,8 +661,6 @@ impl InnerForest { self.forest.truncate(cutoff_block.as_u64()); let after = self.forest.roots().count(); - let removed = before.saturating_sub(after); - - removed + before.saturating_sub(after) } } From 899fccb49be301c3a98cdd25e6d368ddbe93fcfa Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 25 Feb 2026 12:07:53 +0100 Subject: [PATCH 16/18] docs update --- crates/store/src/db/mod.rs | 2 +- crates/store/src/state/mod.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index be54700d7..7a9d758b5 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -630,7 +630,7 @@ impl Db { /// /// Returns: /// - `::LimitExceeded` when too many entries are present - /// - `::AllEntries` if the size is sufficiently small + /// - `::AllEntries` if the size is less than or equal given `entries_limit`, if any pub(crate) async fn reconstruct_storage_map_from_db( &self, account_id: AccountId, diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 9aa72b2e3..945c82495 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -1050,8 +1050,7 @@ impl State { /// /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. /// Returns an error if the forest doesn't have data for the requested slot. - /// All-entries queries (`SlotData::All`) use the forest to request all entries from cache or - /// fall back to database reconstruction. + /// All-entries queries (`SlotData::All`) use the forest to request all entries database. async fn fetch_public_account_details( &self, account_id: AccountId, From 3ade4d68ea8e63e3991aaa46f3926e20281751d8 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 25 Feb 2026 13:33:01 +0100 Subject: [PATCH 17/18] self review, again --- crates/store/src/inner_forest/mod.rs | 89 +++++++++++++--------------- 1 file changed, 40 insertions(+), 49 deletions(-) diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 0138055de..2b51f9573 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -14,6 +14,7 @@ use miden_crypto::merkle::smt::{ }; use miden_crypto::merkle::{EmptySubtreeRoots, MerkleError}; use miden_node_proto::domain::account::AccountStorageMapDetails; +use miden_node_utils::ErrorReport; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ AccountId, @@ -47,16 +48,8 @@ pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; #[derive(Debug, Error)] pub enum InnerForestError { - #[error( - "balance underflow: account {account_id}, faucet {faucet_id}, \ - previous balance {prev_balance}, delta {delta}" - )] - BalanceUnderflow { - account_id: AccountId, - faucet_id: AccountId, - prev_balance: u64, - delta: i64, - }, + #[error(transparent)] + Asset(#[source] AssetError), } #[derive(Debug, Error)] @@ -174,21 +167,21 @@ impl InnerForest { fn map_forest_error(error: LargeSmtForestError) -> MerkleError { match error { LargeSmtForestError::Merkle(merkle) => merkle, - other => MerkleError::InternalError(other.to_string()), + other => MerkleError::InternalError(other.as_report()), } } fn map_forest_error_to_witness(error: LargeSmtForestError) -> WitnessError { match error { LargeSmtForestError::Merkle(merkle) => WitnessError::MerkleError(merkle), - other => WitnessError::MerkleError(MerkleError::InternalError(other.to_string())), + other => WitnessError::MerkleError(MerkleError::InternalError(other.as_report())), } } // ACCESSORS // -------------------------------------------------------------------------------------------- - fn tree_id_for_lookup(&self, lineage: LineageId, block_num: BlockNumber) -> Option { + fn get_tree_id(&self, lineage: LineageId, block_num: BlockNumber) -> Option { let tree = self.lookup_tree_id(lineage, block_num); match self.forest.root_info(tree) { RootInfo::LatestVersion(_) | RootInfo::HistoricalVersion(_) => Some(tree), @@ -204,8 +197,8 @@ impl InnerForest { } #[cfg(test)] - fn tree_root(&self, lineage: LineageId, block_num: BlockNumber) -> Option { - let tree = self.tree_id_for_lookup(lineage, block_num)?; + fn get_tree_root(&self, lineage: LineageId, block_num: BlockNumber) -> Option { + let tree = self.get_tree_id(lineage, block_num)?; match self.forest.root_info(tree) { RootInfo::LatestVersion(root) | RootInfo::HistoricalVersion(root) => Some(root), RootInfo::Missing => None, @@ -220,7 +213,7 @@ impl InnerForest { block_num: BlockNumber, ) -> Option { let lineage = Self::vault_lineage_id(account_id); - self.tree_root(lineage, block_num) + self.get_tree_root(lineage, block_num) } /// Retrieves the storage map root for an account slot at the specified block. @@ -232,7 +225,7 @@ impl InnerForest { block_num: BlockNumber, ) -> Option { let lineage = Self::storage_lineage_id(account_id, slot_name); - self.tree_root(lineage, block_num) + self.get_tree_root(lineage, block_num) } // WITNESSES and PROOFS @@ -250,7 +243,7 @@ impl InnerForest { raw_key: Word, ) -> Result { let lineage = Self::storage_lineage_id(account_id, slot_name); - let tree = self.tree_id_for_lookup(lineage, block_num).ok_or(WitnessError::RootNotFound)?; + let tree = self.get_tree_id(lineage, block_num).ok_or(WitnessError::RootNotFound)?; let key = StorageMap::hash_key(raw_key); let proof = self.forest.open(tree, key).map_err(Self::map_forest_error_to_witness)?; @@ -266,7 +259,7 @@ impl InnerForest { asset_keys: BTreeSet, ) -> Result, WitnessError> { let lineage = Self::vault_lineage_id(account_id); - let tree = self.tree_id_for_lookup(lineage, block_num).ok_or(WitnessError::RootNotFound)?; + let tree = self.get_tree_id(lineage, block_num).ok_or(WitnessError::RootNotFound)?; let witnessees: Result, WitnessError> = Result::from_iter(asset_keys.into_iter().map(|key| { let proof = self @@ -291,7 +284,7 @@ impl InnerForest { raw_keys: &[Word], ) -> Option> { let lineage = Self::storage_lineage_id(account_id, &slot_name); - let tree = self.tree_id_for_lookup(lineage, block_num)?; + let tree = self.get_tree_id(lineage, block_num)?; let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { let key = StorageMap::hash_key(*raw_key); @@ -404,12 +397,13 @@ impl InnerForest { if delta.is_empty() { let lineage = Self::vault_lineage_id(account_id); - let _new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); + let new_root = self.apply_forest_updates(lineage, block_num, Vec::new()); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, + %new_root, vault_entries = 0, "Inserted vault into forest" ); @@ -428,8 +422,11 @@ impl InnerForest { // process non-fungible assets for (&asset, _action) in delta.non_fungible().iter() { - // TODO: assert that action is addition - entries.push((asset.vault_key().into(), asset.into())); + let asset_vault_key = asset.vault_key().into(); + match _action { + NonFungibleDeltaAction::Add => entries.push((asset_vault_key, asset.into())), + NonFungibleDeltaAction::Remove => entries.push((asset_vault_key, EMPTY_WORD)), + } } assert!(!entries.is_empty(), "non-empty delta should contain entries"); @@ -474,33 +471,26 @@ impl InnerForest { // Process fungible assets for (faucet_id, amount_delta) in delta.fungible().iter() { - let key: Word = - FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); - - let new_amount = { - // amount delta is a change that must be applied to previous balance. - // - // TODO: SmtForest only exposes `fn open()` which computes a full Merkle proof. We - // only need the leaf, so a direct `fn get()` method would be faster. - let prev_amount = prev_tree - .and_then(|tree| self.forest.open(tree, key).ok()) - .and_then(|proof| proof.get(&key)) - .and_then(|word| FungibleAsset::try_from(word).ok()) - .map_or(0, |asset| asset.amount()); - - let new_balance = i128::from(prev_amount) + i128::from(*amount_delta); - u64::try_from(new_balance).map_err(|_| InnerForestError::BalanceUnderflow { - account_id, - faucet_id: *faucet_id, - prev_balance: prev_amount, - delta: *amount_delta, - })? + let delta = FungibleAsset::new(*faucet_id, *amount_delta)?; + let key: Word = delta.expect("valid faucet id").vault_key().into(); + + let empty = FungibleAsset::new(*faucet_id, 0)?; + let mut asset = if let Some(prev_tree) = prev_tree { + self.forest + .get(tree, key)? + .map(|asset_key| FungibleAsset::try_from(asset_key)) + .transpose()? + .unwrap_or_else(|| empty) + } else { + empty }; - let value = if new_amount == 0 { + let updated = asset.add(delta).map_err(InnerForestError::Asset)?; + + let value = if updated.amount() == 0 { EMPTY_WORD } else { - FungibleAsset::new(*faucet_id, new_amount).expect("valid fungible asset").into() + Word::from(updated) }; entries.push((key, value)); } @@ -514,17 +504,18 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - let num_entries = entries.len(); + let vault_entries = entries.len(); let lineage = Self::vault_lineage_id(account_id); let operations = Self::build_forest_operations(entries); - let _new_root = self.apply_forest_updates(lineage, block_num, operations); + let new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, - vault_entries = num_entries, + %new_root, + %vault_entries, "Updated vault in forest" ); Ok(()) From afcf07256b56206648d8f2f3fb9d687d76169fd8 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Wed, 25 Feb 2026 13:50:04 +0100 Subject: [PATCH 18/18] cleanup --- crates/store/src/inner_forest/mod.rs | 43 ++++++++++++++++++---------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 2b51f9573..ada365c1b 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -49,7 +49,9 @@ pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; #[derive(Debug, Error)] pub enum InnerForestError { #[error(transparent)] - Asset(#[source] AssetError), + Asset(#[from] AssetError), + #[error(transparent)] + Forest(#[from] LargeSmtForestError), } #[derive(Debug, Error)] @@ -421,9 +423,9 @@ impl InnerForest { } // process non-fungible assets - for (&asset, _action) in delta.non_fungible().iter() { + for (&asset, action) in delta.non_fungible().iter() { let asset_vault_key = asset.vault_key().into(); - match _action { + match action { NonFungibleDeltaAction::Add => entries.push((asset_vault_key, asset.into())), NonFungibleDeltaAction::Remove => entries.push((asset_vault_key, EMPTY_WORD)), } @@ -434,12 +436,13 @@ impl InnerForest { let lineage = Self::vault_lineage_id(account_id); let operations = Self::build_forest_operations(entries); - let _new_root = self.apply_forest_updates(lineage, block_num, operations); + let new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, + %new_root, vault_entries = num_entries, "Inserted vault into forest" ); @@ -471,21 +474,26 @@ impl InnerForest { // Process fungible assets for (faucet_id, amount_delta) in delta.fungible().iter() { - let delta = FungibleAsset::new(*faucet_id, *amount_delta)?; - let key: Word = delta.expect("valid faucet id").vault_key().into(); + let delta_abs = amount_delta.unsigned_abs(); + let delta = FungibleAsset::new(*faucet_id, delta_abs)?; + let key = Word::from(delta.vault_key()); let empty = FungibleAsset::new(*faucet_id, 0)?; - let mut asset = if let Some(prev_tree) = prev_tree { + let asset = if let Some(tree) = prev_tree { self.forest .get(tree, key)? - .map(|asset_key| FungibleAsset::try_from(asset_key)) + .map(FungibleAsset::try_from) .transpose()? - .unwrap_or_else(|| empty) + .unwrap_or(empty) } else { empty }; - let updated = asset.add(delta).map_err(InnerForestError::Asset)?; + let updated = if *amount_delta < 0 { + asset.sub(delta)? + } else { + asset.add(delta)? + }; let value = if updated.amount() == 0 { EMPTY_WORD @@ -580,7 +588,7 @@ impl InnerForest { "account should not be in the forest" ); let operations = Self::build_forest_operations(hashed_entries); - let _new_root = self.apply_forest_updates(lineage, block_num, operations); + let new_root = self.apply_forest_updates(lineage, block_num, operations); let num_entries = raw_map_entries.len(); @@ -589,6 +597,7 @@ impl InnerForest { %account_id, %block_num, ?slot_name, + %new_root, delta_entries = num_entries, "Inserted storage map into forest" ); @@ -624,13 +633,14 @@ impl InnerForest { ); let operations = Self::build_forest_operations(hashed_entries); - let _new_root = self.apply_forest_updates(lineage, block_num, operations); + let new_root = self.apply_forest_updates(lineage, block_num, operations); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, ?slot_name, + %new_root, delta_entries = delta_entries.len(), "Updated storage map in forest" ); @@ -643,10 +653,13 @@ impl InnerForest { /// Prunes old entries from the in-memory forest data structures. /// /// The `LargeSmtForest` itself is truncated to drop historical versions beyond the cutoff. - #[instrument(target = COMPONENT, skip_all, fields(block.number = %chain_tip))] + /// + /// Returns the number of _roots_ that got pruned. + #[instrument(target = COMPONENT, skip_all, ret, fields(block.number = %chain_tip))] pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> usize { - let cutoff_block = - BlockNumber::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); + let cutoff_block = chain_tip + .checked_sub(HISTORICAL_BLOCK_RETENTION) + .unwrap_or(BlockNumber::GENESIS); let before = self.forest.roots().count(); self.forest.truncate(cutoff_block.as_u64());