Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,8 @@ node*.log
discussion.json

db/

task.md
implementation_plan.md
github_issue_summary.md
walkthrough.md
56 changes: 32 additions & 24 deletions scripts/test_failure.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,20 @@ set -e
# Cleanup function
cleanup() {
echo "Stopping all nodes..."
pkill -f "cargo run --quiet --" || true
pkill -9 -f ockham || true
pkill -9 -f cargo || true
sleep 3
}
trap cleanup EXIT

# Clean old logs
trap cleanup EXIT

# PRE-FLIGHT CLEANUP
echo "Ensuring no previous nodes are running..."
pkill -9 -f ockham || true
pkill -9 -f cargo || true
sleep 3

# Clean old logs and DB
rm -f node*.log
rm -rf ./db
Expand Down Expand Up @@ -43,32 +52,31 @@ echo "!!! KILLING NODE 3 (Leader View 3) !!!"
kill $PID3
echo "Node 3 killed. Waiting for timeout and recovery (View 3 -> Timeout -> View 4)..."

# View 3 is 30s timeout. So we wait >30s.
sleep 45
echo "Node 3 killed. Waiting for timeout and recovery (View 4+)..."

# Polling Loop (Max 120s)
MAX_RETRIES=60
SUCCESS=0

for i in $(seq 1 $MAX_RETRIES); do
MAX_VIEW=$(grep -o "View Advanced to [0-9]*" node*.log | awk '{print $NF}' | sort -n | tail -1)
if [ -z "$MAX_VIEW" ]; then MAX_VIEW=0; fi

echo "Wait $i/$MAX_RETRIES... Current View: $MAX_VIEW"

if [ "$MAX_VIEW" -ge 4 ]; then
echo "SUCCESS: Network recovered and advanced to View 4+ (View $MAX_VIEW)."
SUCCESS=1
break
fi
sleep 2
done

echo "--- FINALIZED BLOCKS (Last 5) ---"
grep "EXPLICITLY FINALIZED VIEW" node*.log | tail -n 5

echo ""
echo "--- CONSENSUS HEALTH CHECK ---"
# If we reached View > 4, it means we handled the timeout.
# We can also check if we see "QC Formed for View 3" which was the timeout view.
echo "Checking for View 3 QC..."
if grep -q "QC Formed for View 3" node*.log; then
echo "SUCCESS: Dummy QC for View 3 formed."
grep "QC Formed for View 3" node*.log | head -n 1
else
echo "WARNING: Did not find explicit log for View 3 QC, but checking max view..."
fi
# Verify we reached View 4 or 5
# Logs show "View Advanced to 3. Resetting Timer."
# grep -o "View Advanced to [0-9]*" gives "View Advanced to 3"
# awk '{print $NF}' gives "3"
MAX_VIEW=$(grep -o "View Advanced to [0-9]*" node0.log | awk '{print $NF}' | sort -n | tail -1)
echo "Max View Reached: $MAX_VIEW"

if [ "$MAX_VIEW" -ge 4 ]; then
echo "SUCCESS: Network recovered and advanced to View 4+."
if [ $SUCCESS -eq 1 ]; then
echo "Test Passed!"
else
echo "FAILURE: Network stalled at View $MAX_VIEW."
exit 1
Expand Down
47 changes: 36 additions & 11 deletions src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,14 @@ impl SimplexState {
// but let's save genesis as the "default" block.
storage.save_qc(&genesis_qc).unwrap();

let mut initial_stakes = HashMap::new();
for pk in &committee {
let pk_bytes = pk.0.to_bytes();
let hash = crate::types::keccak256(pk_bytes);
let address = crate::types::Address::from_slice(&hash[12..]);
initial_stakes.insert(address, crate::types::U256::from(5000u64));
}

let initial_state = ConsensusState {
view: 1,
finalized_height: 0,
Expand All @@ -155,7 +163,8 @@ impl SimplexState {
committee: committee.clone(),
pending_validators: vec![],
exiting_validators: vec![],
stakes: HashMap::new(),
stakes: initial_stakes,
inactivity_scores: HashMap::new(),
};
storage.save_consensus_state(&initial_state).unwrap();

Expand Down Expand Up @@ -496,6 +505,8 @@ impl SimplexState {
// 0. Equivocation Check
if let Some(existing_vote) = view_votes.get(&vote.author)
&& existing_vote.block_hash != vote.block_hash
&& existing_vote.block_hash != Hash::default()
&& vote.block_hash != Hash::default()
{
log::warn!(
"Equivocation Detected from {:?} in View {}",
Expand Down Expand Up @@ -610,6 +621,10 @@ impl SimplexState {

actions.push(ConsensusAction::BroadcastBlock(block.clone()));

// Update last_voted_view to prevent double voting via on_proposal reflection
self.last_voted_view = block.view;
self.persist_state();

// Vote for own block
let block_hash = hash_data(&block);
let vote = self.create_vote(block.view, block_hash, VoteType::Notarize);
Expand Down Expand Up @@ -830,16 +845,26 @@ impl SimplexState {
.storage
.get_consensus_state()
.unwrap()
.unwrap_or(ConsensusState {
view: self.current_view,
finalized_height: self.finalized_height,
preferred_block: self.preferred_block,
preferred_view: self.preferred_view,
last_voted_view: self.last_voted_view,
committee: self.committee.clone(),
pending_validators: vec![],
exiting_validators: vec![],
stakes: HashMap::new(),
.unwrap_or_else(|| {
let mut stakes = HashMap::new();
for pk in &self.committee {
let pk_bytes = pk.0.to_bytes();
let hash = crate::types::keccak256(pk_bytes);
let address = crate::types::Address::from_slice(&hash[12..]);
stakes.insert(address, crate::types::U256::from(5000u64));
}
ConsensusState {
view: self.current_view,
finalized_height: self.finalized_height,
preferred_block: self.preferred_block,
preferred_view: self.preferred_view,
last_voted_view: self.last_voted_view,
committee: self.committee.clone(),
pending_validators: vec![],
exiting_validators: vec![],
stakes,
inactivity_scores: HashMap::new(),
}
});

// Update fields we manage
Expand Down
1 change: 1 addition & 0 deletions src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ pub struct ConsensusState {
pub pending_validators: Vec<(PublicKey, View)>,
pub exiting_validators: Vec<(PublicKey, View)>,
pub stakes: HashMap<Address, U256>,
pub inactivity_scores: HashMap<PublicKey, u64>,
}

/// Account Information stored in the Global State
Expand Down
141 changes: 108 additions & 33 deletions src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,64 +95,139 @@ impl Executor {
let hash = crate::types::keccak256(pk_bytes);
let address = Address::from_slice(&hash[12..]);

let mut slashed_amount = U256::from(1000u64); // Fixed Slash Amount
let slashed_amount = U256::from(1000u64); // Fixed Slash Amount

if let Some(mut info) = db.basic(address).unwrap() {
if info.balance < slashed_amount {
slashed_amount = info.balance; // Burn all
}
info.balance -= slashed_amount;
if let Ok(Some(mut state)) = db.get_consensus_state() {
if let Some(stake) = state.stakes.get_mut(&address) {
if *stake < slashed_amount {
*stake = U256::ZERO;
} else {
*stake -= slashed_amount;
}

// Commit Balance Update
let new_info = crate::storage::AccountInfo {
nonce: info.nonce,
balance: info.balance,
code_hash: Hash(info.code_hash.0), // Revm to Internal Hash
code: info.code.map(|c| c.original_bytes()),
};
db.commit_account(address, new_info).unwrap();
log::warn!(
"Slashed Validator {:?} amount {:?}",
address,
slashed_amount
);

// 4. Remove from Committee if low balance (Force Remove)
let min_stake = U256::from(2000u64);
#[allow(clippy::collapsible_if)]
if info.balance < min_stake {
if let Ok(Some(mut state)) = db.get_consensus_state() {
log::warn!(
"Slashed Validator {:?} amount {:?}",
address,
slashed_amount
);

// 4. Remove from Committee if low stake
let min_stake = U256::from(2000u64);
if *stake < min_stake {
// Check Pending
if let Some(pos) = state
.pending_validators
.iter()
.position(|(pk, _)| *pk == offender)
{
state.pending_validators.remove(pos);
// Also refund stake if any?
// Logic: validator must maintain min_stake to stay pending.
log::warn!(
"Validator Removed from Pending (Low Stake): {:?}",
offender
);
}
// Check Active
if let Some(pos) = state.committee.iter().position(|x| *x == offender) {
// Trigger Exit?
// For simplicity, just remove from committee now?
// Ideally should be "Exiting" state.
state.committee.remove(pos);
log::warn!(
"Validator Removed from Committee (Low Stake): {:?}",
offender
);
}
// Check Exiting (Already leaving, but maybe accelerate?)
// No need, just let them exit.
db.save_consensus_state(&state).unwrap();
}
db.save_consensus_state(&state).unwrap();
} else {
log::warn!(
"Validator {:?} has no stake entry found for address {:?}",
offender,
address
);
}
}
}

// 0.5 Process Liveness (Leader Slashing)
if let Ok(Some(mut state)) = db.get_consensus_state() {
let mut changed = false;

// 1. Reward Current Leader (Author)
if let Some(score) = state.inactivity_scores.get_mut(&block.author) {
if *score > 0 {
*score -= 1;
changed = true;
}
} else {
// Initialize if not present (optimization: only if we need to track?)
}

// 2. Penalize Failed Leader (if Timeout QC)
let qc = &block.justify;
if qc.block_hash == Hash::default() && qc.view > 0 {
// Timeout detected for qc.view
let committee_len = state.committee.len();
if committee_len > 0 {
let failed_leader_idx = (qc.view as usize) % committee_len;
// Safety check index
if let Some(failed_leader) = state.committee.get(failed_leader_idx).cloned() {
log::warn!(
"Timeout QC for View {}. Penalizing Leader {:?}",
qc.view,
failed_leader
);

// Increment Score
let score = state
.inactivity_scores
.entry(failed_leader.clone())
.or_insert(0);
*score += 1;
let current_score = *score;
changed = true;

// Immediate Slash (Incremental)
let penalty = U256::from(10u64);
let pk_bytes = failed_leader.0.to_bytes();
let hash = crate::types::keccak256(pk_bytes);
let address = Address::from_slice(&hash[12..]);

if let Some(stake) = state.stakes.get_mut(&address) {
if *stake < penalty {
*stake = U256::ZERO;
} else {
*stake -= penalty;
}
changed = true;
} else {
log::warn!(
"Validator {:?} has no stake entry found for address {:?}",
failed_leader,
address
);
}

// Threshold Check
if current_score > 50 {
log::warn!(
"Validator {:?} exceeded inactivity threshold ({}). Removing from committee.",
failed_leader,
current_score
);
if let Some(pos) =
state.committee.iter().position(|x| *x == failed_leader)
{
state.committee.remove(pos);
// Reset score
state.inactivity_scores.remove(&failed_leader);
changed = true;
}
}
}
}
}

if changed {
db.save_consensus_state(&state).unwrap();
}
}

for tx in &block.payload {
Expand Down
Loading