diff --git a/Cargo.lock b/Cargo.lock index 9e293fd2b8..b5710c3afe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2439,6 +2439,7 @@ dependencies = [ "async-channel 2.5.0", "async-trait", "base64 0.13.1", + "bincode", "borsh 0.10.4", "bs58", "clap 4.5.60", @@ -2472,6 +2473,7 @@ dependencies = [ "light-token", "light-token-client", "light-token-interface", + "mwmatching", "photon-api", "prometheus", "rand 0.8.5", @@ -4836,6 +4838,12 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "mwmatching" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f13b50448d988736cc2c938a76ae336241fcb31a225017c0e3121bd349e7dc06" + [[package]] name = "native-tls" version = "0.2.18" diff --git a/forester/Cargo.toml b/forester/Cargo.toml index 32d1df4e0d..8443848391 100644 --- a/forester/Cargo.toml +++ b/forester/Cargo.toml @@ -43,6 +43,7 @@ reqwest = { workspace = true, features = ["json", "rustls-tls", "blocking"] } futures = { workspace = true } thiserror = { workspace = true } borsh = { workspace = true } +bincode = "1.3" bs58 = { workspace = true } hex = { workspace = true } env_logger = { workspace = true } @@ -61,6 +62,7 @@ itertools = "0.14" async-channel = "2.5" solana-pubkey = { workspace = true } dotenvy = "0.15" +mwmatching = "0.1.1" [dev-dependencies] serial_test = { workspace = true } diff --git a/forester/docs/v1_forester_flows.md b/forester/docs/v1_forester_flows.md new file mode 100644 index 0000000000..8f1c77489f --- /dev/null +++ b/forester/docs/v1_forester_flows.md @@ -0,0 +1,164 @@ +# Forester V1 Flows (PR: v2 Nullify + Blockhash) + +## 1. Transaction Send Flow (Blockhash) + +``` +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ send_batched_transactions │ +└─────────────────────────────────────────────────────────────────────────────────┘ + + ┌──────────────────────────────────┐ + │ prepare_batch_prerequisites │ + │ - fetch queue items │ + │ - single RPC: blockhash + │ + │ priority_fee (same connection) │ + │ - PreparedBatchData: │ + │ recent_blockhash │ + │ last_valid_block_height │ + └──────────────┬───────────────────┘ + │ + ▼ + ┌──────────────────────────────────┐ + │ for each work_chunk (100 items) │ + └──────────────┬───────────────────┘ + │ + ┌────────────┴────────────┐ + │ elapsed > 30s? │ + │ YES → refresh blockhash│ + │ (pool.get_connection │ + │ → rpc.get_latest_ │ + │ blockhash) │ + │ NO → keep current │ + └────────────┬────────────┘ + │ + ▼ + ┌──────────────────────────────────┐ + │ build_signed_transaction_batch │ + │ (recent_blockhash, │ + │ last_valid_block_height) │ + │ → (txs, chunk_last_valid_ │ + │ block_height) │ + └──────────────┬───────────────────┘ + │ + ▼ + ┌──────────────────────────────────┐ + │ execute_transaction_chunk_sending │ + │ PreparedTransaction::legacy( │ + │ tx, chunk_last_valid_block_ │ + │ height) │ + │ - send + confirm │ + │ - blockhash expiry check via │ + │ last_valid_block_height │ + └──────────────────────────────────┘ + + No refetch-before-send. No re-sign. +``` + +## 2. State Nullify Instruction Flow (Legacy vs v2) + +``` +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ Registry: nullify instruction paths │ +└─────────────────────────────────────────────────────────────────────────────────┘ + + LEGACY (proof in ix data) v2 (proof in remaining_accounts) + ─────────────────────── ──────────────────────────────────── + + create_nullify_instruction() create_nullify_with_proof_accounts_instruction() + │ │ + │ ix data: [change_log, queue_idx, │ ix data: [change_log, queue_idx, + │ leaf_idx, proofs[16][32]] │ leaf_idx] (no proofs) + │ │ + │ remaining_accounts: standard │ remaining_accounts: 16 proof + │ (authority, merkle_tree, queue...) │ account pubkeys (key = node bytes) + │ │ + ▼ ▼ + process_nullify() nullify_2 instruction + (proofs from ix data) - validate: 1 change, 1 queue, 1 index + - validate: exactly 16 proof accounts + - extract_proof_nodes_from_remaining_accounts + - process_nullify(..., vec![proof_nodes]) + + Forester V1 uses nullify_2 only (create_nullify_2_instruction). +``` + +## 3. Forester V1 State Nullify Pairing Flow + +``` +┌─────────────────────────────────────────────────────────────────────────────────┐ +│ build_instruction_batches (state nullify path) │ +└─────────────────────────────────────────────────────────────────────────────────┘ + + fetch_proofs_and_create_instructions + │ + │ For each state item: + │ create_nullify_with_proof_accounts_instruction (v2) + │ → StateNullifyInstruction { instruction, proof_nodes, leaf_index } + │ + ▼ + ┌─────────────────────────────────────────────────────────────────────────────┐ + │ allow_pairing? │ + │ batch_size >= 2 AND should_attempt_pairing() │ + └─────────────────────────────────────────────────────────────────────────────┘ + │ + │ should_attempt_pairing checks: + │ - pair_candidates = n*(n-1)/2 <= 2000 (MAX_PAIR_CANDIDATES) + │ - state_nullify_count <= 96 (MAX_PAIRING_INSTRUCTIONS) + │ - remaining_blocks = last_valid - current > 25 (MIN_REMAINING_BLOCKS_FOR_PAIRING) + │ + ├── NO → each nullify → 1 tx (no pairing) + │ + └── YES → pair_state_nullify_batches + │ + │ For each pair (i,j): + │ - pair_fits_transaction_size(ix_i, ix_j)? (serialized <= 1232) + │ - weight = 10000 + proof_overlap_count + │ + │ Max-cardinality matching (mwmatching) + │ - prioritize number of pairs + │ - then maximize proof overlap (fewer unique accounts) + │ + ▼ + Output: Vec> + - paired: [ix_a, ix_b] in one tx + - unpaired: [ix] in one tx + + Address updates: no pairing, chunked by batch_size only. +``` + +## 4. End-to-End Forester V1 State Tree Flow + +``` + Queue (state nullifier) Indexer (proofs) + │ │ + └──────────┬─────────────────┘ + │ + ▼ + prepare_batch_prerequisites + - queue items + - blockhash + last_valid_block_height + - priority_fee + │ + ▼ + for chunk in work_items.chunks(100): + refresh blockhash if 30s elapsed + │ + ▼ + build_signed_transaction_batch + │ + ├─ fetch_proofs_and_create_instructions + │ - state: v2 nullify ix (proof in remaining_accounts) + │ - address: update ix + │ + ├─ build_instruction_batches + │ - address: chunk by batch_size + │ - state nullify: pair if allow_pairing else 1-per-tx + │ + └─ create_smart_transaction per batch + │ + ▼ + execute_transaction_chunk_sending + - PreparedTransaction::legacy(tx, chunk_last_valid_block_height) + - send + confirm with blockhash expiry check +``` + diff --git a/forester/src/processor/v1/helpers.rs b/forester/src/processor/v1/helpers.rs index d980b02e32..6b76eea8ba 100644 --- a/forester/src/processor/v1/helpers.rs +++ b/forester/src/processor/v1/helpers.rs @@ -11,8 +11,8 @@ use forester_utils::{rpc_pool::SolanaRpcPool, utils::wait_for_indexer}; use light_client::{indexer::Indexer, rpc::Rpc}; use light_compressed_account::TreeType; use light_registry::account_compression_cpi::sdk::{ - create_nullify_instruction, create_update_address_merkle_tree_instruction, - CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs, + create_nullify_2_instruction, create_update_address_merkle_tree_instruction, + CreateNullify2InstructionInputs, UpdateAddressMerkleTreeInstructionInputs, }; use solana_program::instruction::Instruction; use tokio::time::Instant; @@ -32,6 +32,19 @@ use crate::{ errors::ForesterError, }; +#[derive(Clone, Debug)] +pub enum PreparedV1Instruction { + AddressUpdate(Instruction), + StateNullify(StateNullifyInstruction), +} + +#[derive(Clone, Debug)] +pub struct StateNullifyInstruction { + pub instruction: Instruction, + pub proof_nodes: Vec<[u8; 32]>, + pub leaf_index: u64, +} + /// Work items should be of only one type and tree pub async fn fetch_proofs_and_create_instructions( authority: Pubkey, @@ -39,7 +52,7 @@ pub async fn fetch_proofs_and_create_instructions( pool: Arc>, epoch: u64, work_items: &[WorkItem], -) -> crate::Result<(Vec, Vec)> { +) -> crate::Result<(Vec, Vec)> { let mut proofs = Vec::new(); let mut instructions = vec![]; @@ -360,7 +373,7 @@ pub async fn fetch_proofs_and_create_instructions( }, epoch, ); - instructions.push(instruction); + instructions.push(PreparedV1Instruction::AddressUpdate(instruction)); } // Process state proofs and create instructions @@ -375,21 +388,33 @@ pub async fn fetch_proofs_and_create_instructions( for (item, proof) in state_items.iter().zip(state_proofs.into_iter()) { proofs.push(MerkleProofType::StateProof(proof.clone())); - let instruction = create_nullify_instruction( - CreateNullifyInstructionInputs { + let instruction = create_nullify_2_instruction( + CreateNullify2InstructionInputs { nullifier_queue: item.tree_account.queue, merkle_tree: item.tree_account.merkle_tree, - change_log_indices: vec![proof.root_seq % STATE_MERKLE_TREE_CHANGELOG], - leaves_queue_indices: vec![item.queue_item_data.index as u16], - indices: vec![proof.leaf_index], - proofs: vec![proof.proof.clone()], + change_log_index: proof.root_seq % STATE_MERKLE_TREE_CHANGELOG, + leaves_queue_index: item.queue_item_data.index as u16, + index: proof.leaf_index, + proof: proof + .proof + .clone() + .try_into() + .map_err(|_| ForesterError::General { + error: "Failed to convert state proof to fixed array".to_string(), + })?, authority, derivation, is_metadata_forester: false, }, epoch, ); - instructions.push(instruction); + instructions.push(PreparedV1Instruction::StateNullify( + StateNullifyInstruction { + instruction, + proof_nodes: proof.proof, + leaf_index: proof.leaf_index, + }, + )); } Ok((proofs, instructions)) diff --git a/forester/src/processor/v1/tx_builder.rs b/forester/src/processor/v1/tx_builder.rs index 463cc0b2bf..b654c2a259 100644 --- a/forester/src/processor/v1/tx_builder.rs +++ b/forester/src/processor/v1/tx_builder.rs @@ -2,10 +2,13 @@ use std::{sync::Arc, time::Duration}; use account_compression::processor::initialize_address_merkle_tree::Pubkey; use async_trait::async_trait; +use bincode::serialized_size; use forester_utils::rpc_pool::SolanaRpcPool; use light_client::rpc::Rpc; +use mwmatching::{Matching, SENTINEL}; use solana_program::hash::Hash; use solana_sdk::{ + compute_budget::ComputeBudgetInstruction, signature::{Keypair, Signer}, transaction::Transaction, }; @@ -16,12 +19,22 @@ use crate::{ epoch_manager::WorkItem, processor::{ tx_cache::ProcessedHashCache, - v1::{config::BuildTransactionBatchConfig, helpers::fetch_proofs_and_create_instructions}, + v1::{ + config::BuildTransactionBatchConfig, + helpers::{ + fetch_proofs_and_create_instructions, PreparedV1Instruction, + StateNullifyInstruction, + }, + }, }, smart_transaction::{create_smart_transaction, CreateSmartTransactionConfig}, Result, }; +const MAX_PAIRING_INSTRUCTIONS: usize = 100; +const MAX_PAIR_CANDIDATES: usize = 4_950; +const MIN_REMAINING_BLOCKS_FOR_PAIRING: u64 = 25; + #[async_trait] #[allow(clippy::too_many_arguments)] pub trait TransactionBuilder: Send + Sync { @@ -58,6 +71,52 @@ impl EpochManagerTransactions { processed_hash_cache: cache, } } + + async fn should_attempt_pairing( + &self, + last_valid_block_height: u64, + state_nullify_count: usize, + ) -> bool { + let pair_candidates = pairing_candidate_count(state_nullify_count); + if !pairing_precheck_passes(state_nullify_count, pair_candidates) { + warn!( + "Skipping nullify pairing due to candidate explosion: count={}, pair_candidates={}", + state_nullify_count, pair_candidates + ); + return false; + } + + let conn = match self.pool.get_connection().await { + Ok(conn) => conn, + Err(e) => { + warn!( + "Skipping nullify pairing because RPC connection unavailable for block-height check: {}", + e + ); + return false; + } + }; + let current_block_height = match conn.get_block_height().await { + Ok(height) => height, + Err(e) => { + warn!( + "Skipping nullify pairing because block-height check failed: {}", + e + ); + return false; + } + }; + let remaining_blocks = last_valid_block_height.saturating_sub(current_block_height); + if !remaining_blocks_allows_pairing(remaining_blocks) { + warn!( + "Skipping nullify pairing near blockhash expiry: remaining_blocks={}", + remaining_blocks + ); + return false; + } + + true + } } #[async_trait] @@ -91,7 +150,7 @@ impl TransactionBuilder for EpochManagerTransactions { }) .collect(); - // Add items with short timeout (30 seconds) for processing + // Add items with a short timeout (15 seconds) for processing. for item in &work_items { let hash_str = bs58::encode(&item.queue_item_data.hash).into_string(); cache.add_with_timeout(&hash_str, Duration::from_secs(15)); @@ -116,7 +175,7 @@ impl TransactionBuilder for EpochManagerTransactions { .collect::>(); let mut transactions = vec![]; - let all_instructions = match fetch_proofs_and_create_instructions( + let prepared_instructions = match fetch_proofs_and_create_instructions( payer.pubkey(), *derivation, self.pool.clone(), @@ -143,11 +202,31 @@ impl TransactionBuilder for EpochManagerTransactions { }; let batch_size = config.batch_size.max(1) as usize; + let state_nullify_count = prepared_instructions + .iter() + .filter(|ix| matches!(ix, PreparedV1Instruction::StateNullify(_))) + .count(); + let allow_pairing = if batch_size >= 2 { + self.should_attempt_pairing(last_valid_block_height, state_nullify_count) + .await + } else { + false + }; + let instruction_batches = build_instruction_batches( + prepared_instructions, + batch_size, + allow_pairing, + payer, + recent_blockhash, + last_valid_block_height, + priority_fee, + config.compute_unit_limit, + )?; - for instruction_chunk in all_instructions.chunks(batch_size) { + for instruction_chunk in instruction_batches { let (transaction, _) = create_smart_transaction(CreateSmartTransactionConfig { payer: payer.insecure_clone(), - instructions: instruction_chunk.to_vec(), + instructions: instruction_chunk, recent_blockhash: *recent_blockhash, compute_unit_price: priority_fee, compute_unit_limit: config.compute_unit_limit, @@ -171,3 +250,274 @@ impl TransactionBuilder for EpochManagerTransactions { Ok((transactions, last_valid_block_height)) } } + +#[allow(clippy::too_many_arguments)] +fn build_instruction_batches( + prepared_instructions: Vec, + batch_size: usize, + allow_pairing: bool, + payer: &Keypair, + recent_blockhash: &Hash, + last_valid_block_height: u64, + priority_fee: Option, + compute_unit_limit: Option, +) -> Result>> { + let mut address_instructions = Vec::new(); + let mut state_nullify_instructions = Vec::new(); + for prepared in prepared_instructions { + match prepared { + PreparedV1Instruction::AddressUpdate(ix) => address_instructions.push(ix), + PreparedV1Instruction::StateNullify(ix) => state_nullify_instructions.push(ix), + } + } + + let mut batches = Vec::new(); + for chunk in address_instructions.chunks(batch_size) { + batches.push(chunk.to_vec()); + } + + if state_nullify_instructions.is_empty() { + return Ok(batches); + } + + let paired_batches = if batch_size >= 2 && allow_pairing { + pair_state_nullify_batches( + state_nullify_instructions, + payer, + recent_blockhash, + priority_fee, + compute_unit_limit, + )? + } else { + state_nullify_instructions + .into_iter() + .map(|ix| vec![ix.instruction]) + .collect() + }; + batches.extend(paired_batches); + Ok(batches) +} + +fn pair_state_nullify_batches( + state_nullify_instructions: Vec, + payer: &Keypair, + recent_blockhash: &Hash, + priority_fee: Option, + compute_unit_limit: Option, +) -> Result>> { + let n = state_nullify_instructions.len(); + if n < 2 { + return Ok(state_nullify_instructions + .into_iter() + .map(|ix| vec![ix.instruction]) + .collect()); + } + + let mut edges: Vec<(usize, usize, i32)> = Vec::new(); + for i in 0..n { + for j in (i + 1)..n { + if !pair_fits_transaction_size( + &state_nullify_instructions[i].instruction, + &state_nullify_instructions[j].instruction, + payer, + recent_blockhash, + priority_fee, + compute_unit_limit, + )? { + continue; + } + let overlap = state_nullify_instructions[i] + .proof_nodes + .iter() + .filter(|node| state_nullify_instructions[j].proof_nodes.contains(node)) + .count() as i32; + // Prioritize pair count first, then maximize proof overlap. + let weight = 10_000 + overlap; + edges.push((i, j, weight)); + } + } + + if edges.is_empty() { + return Ok(state_nullify_instructions + .into_iter() + .map(|ix| vec![ix.instruction]) + .collect()); + } + + let mates = Matching::new(edges).max_cardinality().solve(); + let mut used = vec![false; n]; + let mut paired_batches: Vec<(u64, Vec)> = Vec::new(); + let mut single_batches: Vec<(u64, Vec)> = Vec::new(); + + for i in 0..n { + if used[i] { + continue; + } + let mate = mates.get(i).copied().unwrap_or(SENTINEL); + if mate != SENTINEL && mate > i && mate < n { + used[i] = true; + used[mate] = true; + let (left, right) = if state_nullify_instructions[i].leaf_index + <= state_nullify_instructions[mate].leaf_index + { + (i, mate) + } else { + (mate, i) + }; + let min_leaf = state_nullify_instructions[left].leaf_index; + paired_batches.push(( + min_leaf, + vec![ + state_nullify_instructions[left].instruction.clone(), + state_nullify_instructions[right].instruction.clone(), + ], + )); + } + } + + for i in 0..n { + if !used[i] { + single_batches.push(( + state_nullify_instructions[i].leaf_index, + vec![state_nullify_instructions[i].instruction.clone()], + )); + } + } + + paired_batches.sort_by_key(|(leaf, _)| *leaf); + single_batches.sort_by_key(|(leaf, _)| *leaf); + paired_batches.extend(single_batches); + Ok(paired_batches.into_iter().map(|(_, batch)| batch).collect()) +} + +fn pairing_candidate_count(n: usize) -> usize { + n.saturating_sub(1).saturating_mul(n) / 2 +} + +fn pairing_precheck_passes(state_nullify_count: usize, pair_candidates: usize) -> bool { + if state_nullify_count < 2 { + return false; + } + if state_nullify_count > MAX_PAIRING_INSTRUCTIONS { + return false; + } + pair_candidates <= MAX_PAIR_CANDIDATES +} + +fn remaining_blocks_allows_pairing(remaining_blocks: u64) -> bool { + remaining_blocks > MIN_REMAINING_BLOCKS_FOR_PAIRING +} + +fn pair_fits_transaction_size( + ix_a: &solana_program::instruction::Instruction, + ix_b: &solana_program::instruction::Instruction, + payer: &Keypair, + recent_blockhash: &Hash, + priority_fee: Option, + compute_unit_limit: Option, +) -> Result { + let mut instructions = Vec::with_capacity( + 2 + usize::from(priority_fee.is_some()) + usize::from(compute_unit_limit.is_some()), + ); + if let Some(price) = priority_fee { + instructions.push(ComputeBudgetInstruction::set_compute_unit_price(price)); + } + if let Some(limit) = compute_unit_limit { + instructions.push(ComputeBudgetInstruction::set_compute_unit_limit(limit)); + } + instructions.push(ix_a.clone()); + instructions.push(ix_b.clone()); + + let mut tx = Transaction::new_with_payer(&instructions, Some(&payer.pubkey())); + tx.message.recent_blockhash = *recent_blockhash; + tx.signatures = vec![ + solana_sdk::signature::Signature::default(); + tx.message.header.num_required_signatures as usize + ]; + + let tx_bytes = serialized_size(&tx)? as usize; + Ok(tx_bytes <= 1232) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn max_matching_prioritizes_cardinality() { + let edges = vec![(0usize, 1usize, 10_100i32), (1usize, 2usize, 10_090i32)]; + let mates = Matching::new(edges).max_cardinality().solve(); + let pairs = mates + .iter() + .enumerate() + .filter_map(|(i, mate)| { + if *mate != SENTINEL && *mate > i { + Some((i, *mate)) + } else { + None + } + }) + .collect::>(); + assert_eq!(pairs.len(), 1); + } + + #[test] + fn max_matching_handles_disconnected_graph() { + let edges = vec![(0usize, 1usize, 10_010i32), (2usize, 3usize, 10_005i32)]; + let mates = Matching::new(edges).max_cardinality().solve(); + let matched_vertices = mates.iter().filter(|mate| **mate != SENTINEL).count(); + assert_eq!(matched_vertices, 4); + } + + #[test] + fn max_matching_returns_unmatched_for_empty_edges() { + let mates = Matching::new(vec![]).max_cardinality().solve(); + assert!(mates.is_empty()); + } + + #[test] + fn pairing_candidate_count_matches_combination_formula() { + assert_eq!(pairing_candidate_count(0), 0); + assert_eq!(pairing_candidate_count(1), 0); + assert_eq!(pairing_candidate_count(2), 1); + assert_eq!(pairing_candidate_count(3), 3); + assert_eq!(pairing_candidate_count(10), 45); + } + + #[test] + fn pairing_precheck_enforces_instruction_and_candidate_limits() { + let max_count_by_candidate_limit = 100; // 100 * 99 / 2 = 4950 + assert!(!pairing_precheck_passes(1, pairing_candidate_count(1))); + assert!(pairing_precheck_passes(2, pairing_candidate_count(2))); + assert!(pairing_precheck_passes( + max_count_by_candidate_limit, + pairing_candidate_count(max_count_by_candidate_limit) + )); + assert!(!pairing_precheck_passes( + max_count_by_candidate_limit + 1, + pairing_candidate_count(max_count_by_candidate_limit + 1) + )); + assert!(pairing_precheck_passes( + MAX_PAIRING_INSTRUCTIONS, + pairing_candidate_count(MAX_PAIRING_INSTRUCTIONS) + )); + assert!(!pairing_precheck_passes( + MAX_PAIRING_INSTRUCTIONS + 1, + pairing_candidate_count(MAX_PAIRING_INSTRUCTIONS + 1) + )); + assert!(!pairing_precheck_passes(100, MAX_PAIR_CANDIDATES + 1)); + } + + #[test] + fn remaining_blocks_guard_is_strictly_greater_than_threshold() { + assert!(!remaining_blocks_allows_pairing( + MIN_REMAINING_BLOCKS_FOR_PAIRING - 1 + )); + assert!(!remaining_blocks_allows_pairing( + MIN_REMAINING_BLOCKS_FOR_PAIRING + )); + assert!(remaining_blocks_allows_pairing( + MIN_REMAINING_BLOCKS_FOR_PAIRING + 1 + )); + } +} diff --git a/program-tests/registry-test/tests/nullify_2_regression.rs b/program-tests/registry-test/tests/nullify_2_regression.rs new file mode 100644 index 0000000000..ef6fdfbb5f --- /dev/null +++ b/program-tests/registry-test/tests/nullify_2_regression.rs @@ -0,0 +1,238 @@ +use account_compression::state::QueueAccount; +use anchor_lang::InstructionData; +use forester_utils::account_zero_copy::{get_concurrent_merkle_tree, get_hash_set}; +use light_compressed_account::TreeType; +use light_hasher::Poseidon; +use light_program_test::{ + program_test::LightProgramTest, utils::assert::assert_rpc_error, ProgramTestConfig, +}; +use light_registry::{ + account_compression_cpi::sdk::{ + create_nullify_2_instruction, create_nullify_instruction, CreateNullify2InstructionInputs, + CreateNullifyInstructionInputs, + }, + errors::RegistryError, +}; +use light_test_utils::{e2e_test_env::init_program_test_env, Rpc}; +use serial_test::serial; +use solana_sdk::{ + instruction::Instruction, + pubkey::Pubkey, + signature::{Keypair, Signer}, +}; + +#[serial] +#[tokio::test] +async fn test_nullify_2_validation_and_success() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + let forester = Keypair::new(); + rpc.airdrop_lamports(&forester.pubkey(), 1_000_000_000) + .await + .unwrap(); + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + let (mut rpc, state_tree_bundle) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester.pubkey()), + TreeType::StateV1, + ) + .await; + e2e_env + .compress_sol_deterministic(&forester, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic(&forester, &Pubkey::new_unique(), None) + .await + .unwrap(); + (e2e_env.rpc, e2e_env.indexer.state_merkle_trees[0].clone()) + }; + + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue).await + } + .unwrap(); + let mut queue_index = None; + let mut account_hash = None; + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + queue_index = Some(i as u16); + account_hash = Some(bucket.value_bytes()); + break; + } + } + } + let queue_index = queue_index.unwrap(); + let account_hash = account_hash.unwrap(); + let leaf_index = state_tree_bundle + .merkle_tree + .get_leaf_index(&account_hash) + .unwrap() as u64; + let proof = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index as usize, false) + .unwrap(); + let proof_depth = proof.len(); + let onchain_tree = + get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index() as u64; + + let valid_ix = create_nullify_2_instruction( + CreateNullify2InstructionInputs { + authority: forester.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_index, + leaves_queue_index: queue_index, + index: leaf_index, + proof: proof.try_into().unwrap(), + derivation: forester.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + + let mut empty_proof_accounts_ix = valid_ix.clone(); + empty_proof_accounts_ix + .accounts + .truncate(empty_proof_accounts_ix.accounts.len() - proof_depth); + let result = rpc + .create_and_send_transaction(&[empty_proof_accounts_ix], &forester.pubkey(), &[&forester]) + .await; + assert_rpc_error(result, 0, RegistryError::EmptyProofAccounts.into()).unwrap(); + + let malformed_ix = Instruction { + program_id: light_registry::ID, + accounts: valid_ix.accounts.clone(), + data: light_registry::instruction::Nullify2 { + bump: 255, + change_log_indices: vec![change_log_index, change_log_index + 1], + leaves_queue_indices: vec![queue_index], + indices: vec![leaf_index], + } + .data(), + }; + let result = rpc + .create_and_send_transaction(&[malformed_ix], &forester.pubkey(), &[&forester]) + .await; + assert_rpc_error(result, 0, RegistryError::InvalidNullify2Inputs.into()).unwrap(); + + rpc.create_and_send_transaction(&[valid_ix], &forester.pubkey(), &[&forester]) + .await + .unwrap(); +} + +#[serial] +#[tokio::test] +async fn test_legacy_nullify_still_succeeds() { + let mut rpc = LightProgramTest::new(ProgramTestConfig::default_with_batched_trees(true)) + .await + .unwrap(); + rpc.indexer = None; + let env = rpc.test_accounts.clone(); + let forester = Keypair::new(); + rpc.airdrop_lamports(&forester.pubkey(), 1_000_000_000) + .await + .unwrap(); + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + let (mut rpc, state_tree_bundle) = { + let mut e2e_env = init_program_test_env(rpc, &env, 50).await; + e2e_env.indexer.state_merkle_trees.clear(); + e2e_env.keypair_action_config.fee_assert = false; + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + Some(forester.pubkey()), + TreeType::StateV1, + ) + .await; + e2e_env + .compress_sol_deterministic(&forester, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic(&forester, &Pubkey::new_unique(), None) + .await + .unwrap(); + (e2e_env.rpc, e2e_env.indexer.state_merkle_trees[0].clone()) + }; + let nullifier_queue = unsafe { + get_hash_set::(&mut rpc, state_tree_bundle.accounts.nullifier_queue).await + } + .unwrap(); + let mut queue_index = None; + let mut account_hash = None; + for i in 0..nullifier_queue.get_capacity() { + let bucket = nullifier_queue.get_bucket(i).unwrap(); + if let Some(bucket) = bucket { + if bucket.sequence_number.is_none() { + queue_index = Some(i as u16); + account_hash = Some(bucket.value_bytes()); + break; + } + } + } + let queue_index = queue_index.unwrap(); + let account_hash = account_hash.unwrap(); + let leaf_index = state_tree_bundle + .merkle_tree + .get_leaf_index(&account_hash) + .unwrap() as u64; + let proof = state_tree_bundle + .merkle_tree + .get_proof_of_leaf(leaf_index as usize, false) + .unwrap(); + let onchain_tree = + get_concurrent_merkle_tree::( + &mut rpc, + state_tree_bundle.accounts.merkle_tree, + ) + .await + .unwrap(); + let change_log_index = onchain_tree.changelog_index() as u64; + + let legacy_ix = create_nullify_instruction( + CreateNullifyInstructionInputs { + authority: forester.pubkey(), + nullifier_queue: state_tree_bundle.accounts.nullifier_queue, + merkle_tree: state_tree_bundle.accounts.merkle_tree, + change_log_indices: vec![change_log_index], + leaves_queue_indices: vec![queue_index], + indices: vec![leaf_index], + proofs: vec![proof], + derivation: forester.pubkey(), + is_metadata_forester: true, + }, + 0, + ); + rpc.create_and_send_transaction(&[legacy_ix], &forester.pubkey(), &[&forester]) + .await + .unwrap(); +} diff --git a/programs/registry/src/account_compression_cpi/nullify.rs b/programs/registry/src/account_compression_cpi/nullify.rs index 818e2b43a8..5592b21cb0 100644 --- a/programs/registry/src/account_compression_cpi/nullify.rs +++ b/programs/registry/src/account_compression_cpi/nullify.rs @@ -3,7 +3,9 @@ use account_compression::{ }; use anchor_lang::prelude::*; -use crate::epoch::register_epoch::ForesterEpochPda; +use crate::{epoch::register_epoch::ForesterEpochPda, errors::RegistryError}; + +const NULLIFY_2_PROOF_ACCOUNTS_LEN: usize = 16; #[derive(Accounts)] pub struct NullifyLeaves<'info> { @@ -61,3 +63,69 @@ pub fn process_nullify( proofs, ) } + +pub(crate) fn extract_proof_nodes_from_remaining_accounts( + remaining_accounts: &[AccountInfo<'_>], +) -> Vec<[u8; 32]> { + remaining_accounts + .iter() + .map(|account_info| account_info.key().to_bytes()) + .collect() +} + +pub(crate) fn validate_nullify_2_inputs( + change_log_indices: &[u64], + leaves_queue_indices: &[u16], + indices: &[u64], + proof_accounts_len: usize, +) -> Result<()> { + if change_log_indices.len() != 1 || leaves_queue_indices.len() != 1 || indices.len() != 1 { + return err!(RegistryError::InvalidNullify2Inputs); + } + if proof_accounts_len == 0 { + return err!(RegistryError::EmptyProofAccounts); + } + if proof_accounts_len != NULLIFY_2_PROOF_ACCOUNTS_LEN { + return err!(RegistryError::InvalidProofAccountsLength); + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::validate_nullify_2_inputs; + use crate::errors::RegistryError; + + #[test] + fn nullify_2_inputs_validate_happy_path() { + let result = validate_nullify_2_inputs(&[1], &[1], &[42], 16); + assert!(result.is_ok()); + } + + #[test] + fn nullify_2_inputs_reject_empty_proof_accounts() { + let result = validate_nullify_2_inputs(&[1], &[1], &[42], 0); + assert_eq!( + result.err().unwrap(), + RegistryError::EmptyProofAccounts.into() + ); + } + + #[test] + fn nullify_2_inputs_reject_vector_length_mismatch() { + let result = validate_nullify_2_inputs(&[1, 2], &[1], &[42], 16); + assert_eq!( + result.err().unwrap(), + RegistryError::InvalidNullify2Inputs.into() + ); + } + + #[test] + fn nullify_2_inputs_reject_invalid_proof_accounts_length() { + let result = validate_nullify_2_inputs(&[1], &[1], &[42], 15); + assert_eq!( + result.err().unwrap(), + RegistryError::InvalidProofAccountsLength.into() + ); + } +} diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index f002c35499..0e32f17282 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -9,7 +9,7 @@ use light_batched_merkle_tree::{ initialize_state_tree::InitStateTreeAccountsInstructionData, }; use light_system_program::program::LightSystemProgram; -use solana_sdk::instruction::Instruction; +use solana_sdk::instruction::{AccountMeta, Instruction}; use crate::utils::{ get_cpi_authority_pda, get_forester_epoch_pda_from_authority, get_protocol_config_pda_address, @@ -26,6 +26,18 @@ pub struct CreateNullifyInstructionInputs { pub is_metadata_forester: bool, } +pub struct CreateNullify2InstructionInputs { + pub authority: Pubkey, + pub nullifier_queue: Pubkey, + pub merkle_tree: Pubkey, + pub change_log_index: u64, + pub leaves_queue_index: u16, + pub index: u64, + pub proof: [[u8; 32]; 16], + pub derivation: Pubkey, + pub is_metadata_forester: bool, +} + pub fn create_nullify_instruction( inputs: CreateNullifyInstructionInputs, epoch: u64, @@ -62,6 +74,49 @@ pub fn create_nullify_instruction( } } +pub fn create_nullify_2_instruction( + inputs: CreateNullify2InstructionInputs, + epoch: u64, +) -> Instruction { + let register_program_pda = get_registered_program_pda(&crate::ID); + let registered_forester_pda = if inputs.is_metadata_forester { + None + } else { + Some(get_forester_epoch_pda_from_authority(&inputs.derivation, epoch).0) + }; + let (cpi_authority, bump) = get_cpi_authority_pda(); + let instruction_data = crate::instruction::Nullify2 { + bump, + change_log_indices: vec![inputs.change_log_index], + leaves_queue_indices: vec![inputs.leaves_queue_index], + indices: vec![inputs.index], + }; + + let base_accounts = crate::accounts::NullifyLeaves { + authority: inputs.authority, + registered_forester_pda, + registered_program_pda: register_program_pda, + nullifier_queue: inputs.nullifier_queue, + merkle_tree: inputs.merkle_tree, + log_wrapper: NOOP_PUBKEY.into(), + cpi_authority, + account_compression_program: account_compression::ID, + }; + let mut accounts = base_accounts.to_account_metas(Some(true)); + for node in inputs.proof { + accounts.push(AccountMeta::new_readonly( + Pubkey::new_from_array(node), + false, + )); + } + + Instruction { + program_id: crate::ID, + accounts, + data: instruction_data.data(), + } +} + #[derive(Clone, Debug, PartialEq)] pub struct CreateMigrateStateInstructionInputs { pub authority: Pubkey, @@ -545,3 +600,85 @@ pub fn create_rollover_batch_address_tree_instruction( data: instruction_data.data(), } } + +#[cfg(test)] +mod tests { + use anchor_lang::Discriminator; + + use super::*; + + #[test] + fn create_nullify_instruction_uses_legacy_payload() { + let authority = Pubkey::new_unique(); + let derivation = Pubkey::new_unique(); + let nullifier_queue = Pubkey::new_unique(); + let merkle_tree = Pubkey::new_unique(); + let proof = (0..16) + .map(|i| { + let mut node = [0u8; 32]; + node[0] = i as u8; + node + }) + .collect::>(); + let ix = create_nullify_instruction( + CreateNullifyInstructionInputs { + authority, + nullifier_queue, + merkle_tree, + change_log_indices: vec![7], + leaves_queue_indices: vec![11], + indices: vec![42], + proofs: vec![proof], + derivation, + is_metadata_forester: false, + }, + 1, + ); + + assert_eq!(ix.program_id, crate::ID); + assert_eq!(ix.accounts.len(), 8); + assert_eq!(&ix.data[..8], crate::instruction::Nullify::DISCRIMINATOR); + assert_eq!(ix.data.len(), 559); + } + + #[test] + fn create_nullify_2_instruction_uses_minimal_payload_and_remaining_accounts() { + let authority = Pubkey::new_unique(); + let derivation = Pubkey::new_unique(); + let nullifier_queue = Pubkey::new_unique(); + let merkle_tree = Pubkey::new_unique(); + let proof = (0..16) + .map(|i| { + let mut node = [0u8; 32]; + node[0] = i as u8; + node + }) + .collect::>(); + let ix = create_nullify_2_instruction( + CreateNullify2InstructionInputs { + authority, + nullifier_queue, + merkle_tree, + change_log_index: 7, + leaves_queue_index: 11, + index: 42, + proof: proof.clone().try_into().unwrap(), + derivation, + is_metadata_forester: false, + }, + 1, + ); + + assert_eq!(ix.program_id, crate::ID); + assert_eq!(ix.accounts.len(), 8 + 16); + for (account_meta, node) in ix.accounts[8..].iter().zip(proof.iter()) { + assert_eq!(account_meta.pubkey, Pubkey::new_from_array(*node)); + assert!(!account_meta.is_signer); + assert!(!account_meta.is_writable); + } + + assert_eq!(&ix.data[..8], crate::instruction::Nullify2::DISCRIMINATOR); + // 8-byte discriminator + 31-byte minimal payload. + assert_eq!(ix.data.len(), 39); + } +} diff --git a/programs/registry/src/errors.rs b/programs/registry/src/errors.rs index 7c445d2ca3..a47293e823 100644 --- a/programs/registry/src/errors.rs +++ b/programs/registry/src/errors.rs @@ -38,6 +38,12 @@ pub enum RegistryError { InvalidTokenAccountData, #[msg("Indices array cannot be empty")] EmptyIndices, + #[msg("Proof accounts cannot be empty")] + EmptyProofAccounts, + #[msg("Nullify2 proof accounts length is invalid")] + InvalidProofAccountsLength, + #[msg("Nullify2 supports exactly one change, queue index, and leaf index")] + InvalidNullify2Inputs, #[msg("Failed to borrow account data")] BorrowAccountDataFailed, #[msg("Failed to serialize instruction data")] diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index a21b58cd4b..16a5d56548 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -11,6 +11,9 @@ use light_merkle_tree_metadata::merkle_tree::MerkleTreeMetadata; pub mod account_compression_cpi; pub mod errors; +use account_compression_cpi::nullify::{ + extract_proof_nodes_from_remaining_accounts, validate_nullify_2_inputs, +}; pub use account_compression_cpi::{ batch_append::*, batch_nullify::*, batch_update_address_tree::*, initialize_batched_address_tree::*, initialize_batched_state_tree::*, @@ -420,6 +423,40 @@ pub mod light_registry { ) } + pub fn nullify_2<'info>( + ctx: Context<'_, '_, '_, 'info, NullifyLeaves<'info>>, + bump: u8, + change_log_indices: Vec, + leaves_queue_indices: Vec, + indices: Vec, + ) -> Result<()> { + let metadata = ctx.accounts.merkle_tree.load()?.metadata; + check_forester( + &metadata, + ctx.accounts.authority.key(), + ctx.accounts.nullifier_queue.key(), + &mut ctx.accounts.registered_forester_pda, + DEFAULT_WORK_V1, + )?; + + validate_nullify_2_inputs( + &change_log_indices, + &leaves_queue_indices, + &indices, + ctx.remaining_accounts.len(), + )?; + let proof_nodes = extract_proof_nodes_from_remaining_accounts(ctx.remaining_accounts); + + process_nullify( + &ctx, + bump, + change_log_indices, + leaves_queue_indices, + indices, + vec![proof_nodes], + ) + } + #[allow(clippy::too_many_arguments)] pub fn update_address_merkle_tree( ctx: Context,