diff --git a/.gitignore b/.gitignore
index 1d1abe6..70edf98 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,8 @@ node*.log
discussion.json
db/
+
+task.md
+implementation_plan.md
+github_issue_summary.md
+walkthrough.md
diff --git a/scripts/test_failure.sh b/scripts/test_failure.sh
index 87ff173..409d3ad 100755
--- a/scripts/test_failure.sh
+++ b/scripts/test_failure.sh
@@ -4,11 +4,20 @@ set -e
# Cleanup function
cleanup() {
echo "Stopping all nodes..."
- pkill -f "cargo run --quiet --" || true
+ pkill -9 -f ockham || true
+ pkill -9 -f cargo || true
+ sleep 3
}
trap cleanup EXIT
-# Clean old logs
+trap cleanup EXIT
+
+# PRE-FLIGHT CLEANUP
+echo "Ensuring no previous nodes are running..."
+pkill -9 -f ockham || true
+pkill -9 -f cargo || true
+sleep 3
+
# Clean old logs and DB
rm -f node*.log
rm -rf ./db
@@ -43,32 +52,31 @@ echo "!!! KILLING NODE 3 (Leader View 3) !!!"
kill $PID3
echo "Node 3 killed. Waiting for timeout and recovery (View 3 -> Timeout -> View 4)..."
-# View 3 is 30s timeout. So we wait >30s.
-sleep 45
+echo "Node 3 killed. Waiting for timeout and recovery (View 4+)..."
+
+# Polling Loop (Max 120s)
+MAX_RETRIES=60
+SUCCESS=0
+
+for i in $(seq 1 $MAX_RETRIES); do
+ MAX_VIEW=$(grep -o "View Advanced to [0-9]*" node*.log | awk '{print $NF}' | sort -n | tail -1)
+ if [ -z "$MAX_VIEW" ]; then MAX_VIEW=0; fi
+
+ echo "Wait $i/$MAX_RETRIES... Current View: $MAX_VIEW"
+
+ if [ "$MAX_VIEW" -ge 4 ]; then
+ echo "SUCCESS: Network recovered and advanced to View 4+ (View $MAX_VIEW)."
+ SUCCESS=1
+ break
+ fi
+ sleep 2
+done
echo "--- FINALIZED BLOCKS (Last 5) ---"
grep "EXPLICITLY FINALIZED VIEW" node*.log | tail -n 5
-echo ""
-echo "--- CONSENSUS HEALTH CHECK ---"
-# If we reached View > 4, it means we handled the timeout.
-# We can also check if we see "QC Formed for View 3" which was the timeout view.
-echo "Checking for View 3 QC..."
-if grep -q "QC Formed for View 3" node*.log; then
- echo "SUCCESS: Dummy QC for View 3 formed."
- grep "QC Formed for View 3" node*.log | head -n 1
-else
- echo "WARNING: Did not find explicit log for View 3 QC, but checking max view..."
-fi
-# Verify we reached View 4 or 5
-# Logs show "View Advanced to 3. Resetting Timer."
-# grep -o "View Advanced to [0-9]*" gives "View Advanced to 3"
-# awk '{print $NF}' gives "3"
-MAX_VIEW=$(grep -o "View Advanced to [0-9]*" node0.log | awk '{print $NF}' | sort -n | tail -1)
-echo "Max View Reached: $MAX_VIEW"
-
-if [ "$MAX_VIEW" -ge 4 ]; then
- echo "SUCCESS: Network recovered and advanced to View 4+."
+if [ $SUCCESS -eq 1 ]; then
+ echo "Test Passed!"
else
echo "FAILURE: Network stalled at View $MAX_VIEW."
exit 1
diff --git a/src/consensus.rs b/src/consensus.rs
index 15e8ac8..2880cf9 100644
--- a/src/consensus.rs
+++ b/src/consensus.rs
@@ -146,6 +146,14 @@ impl SimplexState {
// but let's save genesis as the "default" block.
storage.save_qc(&genesis_qc).unwrap();
+ let mut initial_stakes = HashMap::new();
+ for pk in &committee {
+ let pk_bytes = pk.0.to_bytes();
+ let hash = crate::types::keccak256(pk_bytes);
+ let address = crate::types::Address::from_slice(&hash[12..]);
+ initial_stakes.insert(address, crate::types::U256::from(5000u64));
+ }
+
let initial_state = ConsensusState {
view: 1,
finalized_height: 0,
@@ -155,7 +163,8 @@ impl SimplexState {
committee: committee.clone(),
pending_validators: vec![],
exiting_validators: vec![],
- stakes: HashMap::new(),
+ stakes: initial_stakes,
+ inactivity_scores: HashMap::new(),
};
storage.save_consensus_state(&initial_state).unwrap();
@@ -496,6 +505,8 @@ impl SimplexState {
// 0. Equivocation Check
if let Some(existing_vote) = view_votes.get(&vote.author)
&& existing_vote.block_hash != vote.block_hash
+ && existing_vote.block_hash != Hash::default()
+ && vote.block_hash != Hash::default()
{
log::warn!(
"Equivocation Detected from {:?} in View {}",
@@ -610,6 +621,10 @@ impl SimplexState {
actions.push(ConsensusAction::BroadcastBlock(block.clone()));
+ // Update last_voted_view to prevent double voting via on_proposal reflection
+ self.last_voted_view = block.view;
+ self.persist_state();
+
// Vote for own block
let block_hash = hash_data(&block);
let vote = self.create_vote(block.view, block_hash, VoteType::Notarize);
@@ -830,16 +845,26 @@ impl SimplexState {
.storage
.get_consensus_state()
.unwrap()
- .unwrap_or(ConsensusState {
- view: self.current_view,
- finalized_height: self.finalized_height,
- preferred_block: self.preferred_block,
- preferred_view: self.preferred_view,
- last_voted_view: self.last_voted_view,
- committee: self.committee.clone(),
- pending_validators: vec![],
- exiting_validators: vec![],
- stakes: HashMap::new(),
+ .unwrap_or_else(|| {
+ let mut stakes = HashMap::new();
+ for pk in &self.committee {
+ let pk_bytes = pk.0.to_bytes();
+ let hash = crate::types::keccak256(pk_bytes);
+ let address = crate::types::Address::from_slice(&hash[12..]);
+ stakes.insert(address, crate::types::U256::from(5000u64));
+ }
+ ConsensusState {
+ view: self.current_view,
+ finalized_height: self.finalized_height,
+ preferred_block: self.preferred_block,
+ preferred_view: self.preferred_view,
+ last_voted_view: self.last_voted_view,
+ committee: self.committee.clone(),
+ pending_validators: vec![],
+ exiting_validators: vec![],
+ stakes,
+ inactivity_scores: HashMap::new(),
+ }
});
// Update fields we manage
diff --git a/src/storage.rs b/src/storage.rs
index 5e2074e..908d8fa 100644
--- a/src/storage.rs
+++ b/src/storage.rs
@@ -87,6 +87,7 @@ pub struct ConsensusState {
pub pending_validators: Vec<(PublicKey, View)>,
pub exiting_validators: Vec<(PublicKey, View)>,
pub stakes: HashMap
,
+ pub inactivity_scores: HashMap,
}
/// Account Information stored in the Global State
diff --git a/src/vm.rs b/src/vm.rs
index dd6e711..2b18426 100644
--- a/src/vm.rs
+++ b/src/vm.rs
@@ -95,33 +95,25 @@ impl Executor {
let hash = crate::types::keccak256(pk_bytes);
let address = Address::from_slice(&hash[12..]);
- let mut slashed_amount = U256::from(1000u64); // Fixed Slash Amount
+ let slashed_amount = U256::from(1000u64); // Fixed Slash Amount
- if let Some(mut info) = db.basic(address).unwrap() {
- if info.balance < slashed_amount {
- slashed_amount = info.balance; // Burn all
- }
- info.balance -= slashed_amount;
+ if let Ok(Some(mut state)) = db.get_consensus_state() {
+ if let Some(stake) = state.stakes.get_mut(&address) {
+ if *stake < slashed_amount {
+ *stake = U256::ZERO;
+ } else {
+ *stake -= slashed_amount;
+ }
- // Commit Balance Update
- let new_info = crate::storage::AccountInfo {
- nonce: info.nonce,
- balance: info.balance,
- code_hash: Hash(info.code_hash.0), // Revm to Internal Hash
- code: info.code.map(|c| c.original_bytes()),
- };
- db.commit_account(address, new_info).unwrap();
- log::warn!(
- "Slashed Validator {:?} amount {:?}",
- address,
- slashed_amount
- );
-
- // 4. Remove from Committee if low balance (Force Remove)
- let min_stake = U256::from(2000u64);
- #[allow(clippy::collapsible_if)]
- if info.balance < min_stake {
- if let Ok(Some(mut state)) = db.get_consensus_state() {
+ log::warn!(
+ "Slashed Validator {:?} amount {:?}",
+ address,
+ slashed_amount
+ );
+
+ // 4. Remove from Committee if low stake
+ let min_stake = U256::from(2000u64);
+ if *stake < min_stake {
// Check Pending
if let Some(pos) = state
.pending_validators
@@ -129,8 +121,6 @@ impl Executor {
.position(|(pk, _)| *pk == offender)
{
state.pending_validators.remove(pos);
- // Also refund stake if any?
- // Logic: validator must maintain min_stake to stay pending.
log::warn!(
"Validator Removed from Pending (Low Stake): {:?}",
offender
@@ -138,21 +128,106 @@ impl Executor {
}
// Check Active
if let Some(pos) = state.committee.iter().position(|x| *x == offender) {
- // Trigger Exit?
- // For simplicity, just remove from committee now?
- // Ideally should be "Exiting" state.
state.committee.remove(pos);
log::warn!(
"Validator Removed from Committee (Low Stake): {:?}",
offender
);
}
- // Check Exiting (Already leaving, but maybe accelerate?)
- // No need, just let them exit.
- db.save_consensus_state(&state).unwrap();
+ }
+ db.save_consensus_state(&state).unwrap();
+ } else {
+ log::warn!(
+ "Validator {:?} has no stake entry found for address {:?}",
+ offender,
+ address
+ );
+ }
+ }
+ }
+
+ // 0.5 Process Liveness (Leader Slashing)
+ if let Ok(Some(mut state)) = db.get_consensus_state() {
+ let mut changed = false;
+
+ // 1. Reward Current Leader (Author)
+ if let Some(score) = state.inactivity_scores.get_mut(&block.author) {
+ if *score > 0 {
+ *score -= 1;
+ changed = true;
+ }
+ } else {
+ // Initialize if not present (optimization: only if we need to track?)
+ }
+
+ // 2. Penalize Failed Leader (if Timeout QC)
+ let qc = &block.justify;
+ if qc.block_hash == Hash::default() && qc.view > 0 {
+ // Timeout detected for qc.view
+ let committee_len = state.committee.len();
+ if committee_len > 0 {
+ let failed_leader_idx = (qc.view as usize) % committee_len;
+ // Safety check index
+ if let Some(failed_leader) = state.committee.get(failed_leader_idx).cloned() {
+ log::warn!(
+ "Timeout QC for View {}. Penalizing Leader {:?}",
+ qc.view,
+ failed_leader
+ );
+
+ // Increment Score
+ let score = state
+ .inactivity_scores
+ .entry(failed_leader.clone())
+ .or_insert(0);
+ *score += 1;
+ let current_score = *score;
+ changed = true;
+
+ // Immediate Slash (Incremental)
+ let penalty = U256::from(10u64);
+ let pk_bytes = failed_leader.0.to_bytes();
+ let hash = crate::types::keccak256(pk_bytes);
+ let address = Address::from_slice(&hash[12..]);
+
+ if let Some(stake) = state.stakes.get_mut(&address) {
+ if *stake < penalty {
+ *stake = U256::ZERO;
+ } else {
+ *stake -= penalty;
+ }
+ changed = true;
+ } else {
+ log::warn!(
+ "Validator {:?} has no stake entry found for address {:?}",
+ failed_leader,
+ address
+ );
+ }
+
+ // Threshold Check
+ if current_score > 50 {
+ log::warn!(
+ "Validator {:?} exceeded inactivity threshold ({}). Removing from committee.",
+ failed_leader,
+ current_score
+ );
+ if let Some(pos) =
+ state.committee.iter().position(|x| *x == failed_leader)
+ {
+ state.committee.remove(pos);
+ // Reset score
+ state.inactivity_scores.remove(&failed_leader);
+ changed = true;
+ }
+ }
}
}
}
+
+ if changed {
+ db.save_consensus_state(&state).unwrap();
+ }
}
for tx in &block.payload {
diff --git a/tests/liveness_test.rs b/tests/liveness_test.rs
new file mode 100644
index 0000000..e7ecb9e
--- /dev/null
+++ b/tests/liveness_test.rs
@@ -0,0 +1,168 @@
+use ockham::crypto::{Hash, PrivateKey, PublicKey};
+use ockham::storage::Storage;
+use ockham::types::{Block, QuorumCertificate, U256};
+use std::sync::Arc;
+use std::sync::Mutex;
+
+#[test]
+fn test_liveness_slashing() {
+ // 1. Setup Committee (4 Nodes)
+ let keys: Vec<(PublicKey, PrivateKey)> = (0..4)
+ .map(|i| ockham::crypto::generate_keypair_from_id(i as u64))
+ .collect();
+ let committee: Vec = keys.iter().map(|k| k.0.clone()).collect();
+
+ // Node 0 is our reference, but we are testing the VM logic which is shared
+ let _my_id = keys[0].0.clone();
+ let _my_key = keys[0].1.clone();
+
+ // Target Victim: Node 1 (Offender)
+ let victim_idx = 1;
+ let victim_id = keys[victim_idx].0.clone();
+
+ let storage = Arc::new(ockham::storage::MemStorage::new());
+ let _tx_pool = Arc::new(ockham::tx_pool::TxPool::new(storage.clone()));
+
+ // Initialize Victim Balance (1000 units)
+ let victim_pk_bytes = victim_id.0.to_bytes();
+ let victim_hash = ockham::types::keccak256(victim_pk_bytes);
+ let victim_addr = ockham::types::Address::from_slice(&victim_hash[12..]);
+
+ let initial_balance = U256::from(1000u64);
+ let account = ockham::storage::AccountInfo {
+ nonce: 0,
+ balance: initial_balance,
+ code_hash: Hash(ockham::types::keccak256([]).into()),
+ code: None,
+ };
+ storage.save_account(&victim_addr, &account).unwrap();
+
+ let state_manager = Arc::new(Mutex::new(ockham::state::StateManager::new(
+ storage.clone(),
+ None,
+ )));
+ let executor = ockham::vm::Executor::new(
+ state_manager.clone(),
+ ockham::types::DEFAULT_BLOCK_GAS_LIMIT,
+ );
+
+ // Initialize State
+ let initial_state = ockham::storage::ConsensusState {
+ view: 1,
+ finalized_height: 0,
+ preferred_block: Hash::default(),
+ preferred_view: 0,
+ last_voted_view: 0,
+ committee: committee.clone(),
+ pending_validators: vec![],
+ exiting_validators: vec![],
+ stakes: {
+ let mut m = std::collections::HashMap::new();
+ m.insert(victim_addr, U256::from(1000u64));
+ m
+ },
+ inactivity_scores: std::collections::HashMap::new(),
+ };
+ storage.save_consensus_state(&initial_state).unwrap();
+
+ // 2. Simulate Timeout of View 1 (Leader: Node 1)
+ // View 1 -> 1 % 4 = 1. So Node 1 is Leader of View 1.
+ // We create a Block in View 2 (Leader: Node 2) that justifies View 1 with a Timeout QC.
+
+ let timeout_view = 1;
+ let timeout_qc = QuorumCertificate {
+ view: timeout_view,
+ block_hash: Hash::default(), // ZeroHash = Timeout
+ signature: ockham::crypto::Signature::default(),
+ signers: vec![],
+ };
+
+ // Block Author: Node 2
+ let author_idx = 2;
+ let block = Block::new(
+ keys[author_idx].0.clone(),
+ 2, // View
+ Hash::default(), // Parent
+ timeout_qc,
+ Hash::default(),
+ Hash::default(),
+ vec![],
+ U256::ZERO,
+ 0,
+ vec![],
+ Hash::default(),
+ );
+
+ // 3. Execute Block
+ let mut block_to_exec = block.clone();
+ executor.execute_block(&mut block_to_exec).unwrap();
+
+ // 4. Verify Slashing
+ {
+ let db = state_manager.lock().unwrap();
+ // Check Stake
+ let state = db.get_consensus_state().unwrap().unwrap();
+ let stake = state.stakes.get(&victim_addr).expect("Stake should exist");
+ assert_eq!(*stake, U256::from(990u64), "Stake should be slashed by 10");
+
+ // Check Score
+ let score = state
+ .inactivity_scores
+ .get(&victim_id)
+ .expect("Score should exist");
+ assert_eq!(*score, 1, "Score should be 1");
+ }
+
+ // 5. Reward Check (Node 2 should decrement, but it's 0 so stays 0)
+ // Let's set Node 2 score to 5 first.
+ {
+ let db = state_manager.lock().unwrap();
+ let mut state = db.get_consensus_state().unwrap().unwrap();
+ state
+ .inactivity_scores
+ .insert(keys[author_idx].0.clone(), 5);
+ db.save_consensus_state(&state).unwrap();
+ }
+
+ // Execute again (same block reuse is fine for logic testing)
+ executor.execute_block(&mut block_to_exec).unwrap();
+
+ {
+ let db = state_manager.lock().unwrap();
+ let state = db.get_consensus_state().unwrap().unwrap();
+ let score = state.inactivity_scores.get(&keys[author_idx].0).unwrap();
+ assert_eq!(*score, 4, "Author score should decrement");
+
+ let victim_score = state.inactivity_scores.get(&victim_id).unwrap();
+ assert_eq!(*victim_score, 2, "Victim score should increment again");
+
+ let stake = state.stakes.get(&victim_addr).unwrap();
+ assert_eq!(*stake, U256::from(980u64), "Stake slashed again");
+ }
+
+ // 6. Threshold Removal
+ // Repeat until score > 50
+ // Current score 2. Need 49 more loops.
+ for _ in 0..50 {
+ executor.execute_block(&mut block_to_exec).unwrap();
+ }
+
+ {
+ let db = state_manager.lock().unwrap();
+ let state = db.get_consensus_state().unwrap().unwrap();
+
+ // Check if removed from committee
+ assert!(
+ !state.committee.contains(&victim_id),
+ "Victim should be removed from committee"
+ );
+
+ // Check if score reset
+ assert!(
+ state.inactivity_scores.get(&victim_id).is_none(),
+ "Score should be clear"
+ );
+ }
+
+ println!("Liveness Slashing Test Passed!");
+}
diff --git a/tests/rpc_test.rs b/tests/rpc_test.rs
index 7f99ba3..d47d98a 100644
--- a/tests/rpc_test.rs
+++ b/tests/rpc_test.rs
@@ -19,6 +19,7 @@ async fn test_rpc_get_status() {
pending_validators: vec![],
exiting_validators: vec![],
stakes: HashMap::new(),
+ inactivity_scores: HashMap::new(),
};
storage.save_consensus_state(&state).unwrap();
@@ -76,6 +77,7 @@ async fn test_rpc_get_block() {
pending_validators: vec![],
exiting_validators: vec![],
stakes: HashMap::new(),
+ inactivity_scores: HashMap::new(),
};
storage.save_consensus_state(&state).unwrap();
diff --git a/tests/slashing_test.rs b/tests/slashing_test.rs
index 365f441..b8843d0 100644
--- a/tests/slashing_test.rs
+++ b/tests/slashing_test.rs
@@ -58,6 +58,14 @@ fn test_slashing_flow() {
ockham::types::DEFAULT_BLOCK_GAS_LIMIT,
);
+ // Initialize Stakes for Offender
+ {
+ let mut db = state_manager.lock().unwrap();
+ let mut state = db.get_consensus_state().unwrap().unwrap();
+ state.stakes.insert(offender_addr, U256::from(5000u64));
+ db.save_consensus_state(&state).unwrap();
+ }
+
// 2. Create Equivocation Votes (View 2)
let view = 2;
let block_a_hash = Hash([1u8; 32]);
@@ -168,16 +176,13 @@ fn test_slashing_flow() {
executor.execute_block(&mut block_to_exec).unwrap();
- // Check balance
+ // Check Stake
let mut db = validator.executor.state.lock().unwrap();
- let account = db.basic(offender_addr).unwrap().unwrap();
+ let state = db.get_consensus_state().unwrap().unwrap();
+ let stake = state.stakes.get(&offender_addr).unwrap();
// Slashed amount is 1000. Initial 5000. Should be 4000.
- assert_eq!(
- account.balance,
- U256::from(4000u64),
- "Balance should be slashed"
- );
+ assert_eq!(*stake, U256::from(4000u64), "Stake should be slashed");
println!("Slashing Test Passed!");
}