diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dc8e0ede..4676d01ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,26 @@ +## Diamond Node Software 4.0.2 + + +### New behavior for validator nodes +- [Autoshutdown if a Node becomes a regular Node](https://github.com/DMDcoin/diamond-node/issues/322) +- [Remove empty blocks during key gen phases behaviour](https://github.com/DMDcoin/diamond-node/issues/327) +- [Service Transaction cleanup (garbage collect)](https://github.com/DMDcoin/diamond-node/issues/172) + +### RPC +- [Gas price from contracts](https://github.com/DMDcoin/diamond-node/issues/159) + +### Further improvements +- [FIXED: received transactions are getting pooled, if announced by another peer](https://github.com/DMDcoin/diamond-node/issues/304) +- [FIXED: dropped transactions are getting pooled](https://github.com/DMDcoin/diamond-node/issues/303) +- [FIXED: key generation can panic if faulty validators write malicious parts](https://github.com/DMDcoin/diamond-node/issues/100) +- [FIXED: already included transactions are refretched from peers](https://github.com/DMDcoin/diamond-node/issues/196) +- [FIXED:not staked nodes write log entries about service transactions](https://github.com/DMDcoin/diamond-node/issues/323) +- [Gracefull Node Shutdown: increase to 15 seconds](https://github.com/DMDcoin/diamond-node/issues/321) + + ## Diamond Node Software 4.0.1 -First hotfix +OPTIONAL: First hotfix Mitigates the transaction spam caused by flaws in the transaction management of report disconnectivity transactions. - [Reduce Intervals for connectivity checks](https://github.com/DMDcoin/diamond-node/issues/313) diff --git a/Cargo.lock b/Cargo.lock index f41aa956e..8cc7e6cb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -850,7 +850,7 @@ dependencies = [ [[package]] name = "diamond-node" -version = "4.0.1" +version = "4.0.2" dependencies = [ "ansi_term 0.10.2", "atty", @@ -3579,7 +3579,7 @@ dependencies = [ [[package]] name = "parity-version" -version = "4.0.1" +version = "4.0.2" dependencies = [ "parity-bytes", "rlp", diff --git a/Cargo.toml b/Cargo.toml index edeb127ca..1e225d0df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ description = "Diamond Node" name = "diamond-node" # NOTE Make sure to update util/version/Cargo.toml as well -version = "4.0.1" +version = "4.0.2" license = "GPL-3.0" authors = [ "bit.diamonds developers", diff --git a/bin/oe/run.rs b/bin/oe/run.rs index 92dc4feb5..c58504c86 100644 --- a/bin/oe/run.rs +++ b/bin/oe/run.rs @@ -723,7 +723,7 @@ impl RunningClient { .name("diamond-node-force-quit".to_string()) .spawn(move || { - let duration_soft = 5; + let duration_soft = 15; // we make a force quit if after 90 seconds, if this shutdown routine std::thread::sleep(Duration::from_secs(duration_soft)); warn!(target: "shutdown", "shutdown not happened within {duration_soft} seconds, starting force exiting the process."); diff --git a/crates/concensus/miner/src/pool/queue.rs b/crates/concensus/miner/src/pool/queue.rs index 3034123d4..dc9fd9e6f 100644 --- a/crates/concensus/miner/src/pool/queue.rs +++ b/crates/concensus/miner/src/pool/queue.rs @@ -41,6 +41,8 @@ use crate::pool::{ verifier, PendingOrdering, PendingSettings, PrioritizationStrategy, }; +use super::VerifiedTransaction; + type Listener = ( LocalTransactionsList, (listener::Notifier, listener::Logger), @@ -413,6 +415,17 @@ impl TransactionQueue { .collect() } + /// Performs garbage collection on the pool of this `TransactionQueue` for free service transactions. + /// Removes transactions that are not valid anymore. + /// The process executes listener calls. + pub fn garbage_collect bool>( + &self, + service_transaction_check: F, + ) { + let mut pool = self.pool.write(); + pool.garbage_collect(service_transaction_check); + } + /// Computes unordered set of pending hashes. /// /// Since strict nonce-checking is not required, you may get some false positive future transactions as well. diff --git a/crates/ethcore/service/src/service.rs b/crates/ethcore/service/src/service.rs index 9885ea874..67319b6f6 100644 --- a/crates/ethcore/service/src/service.rs +++ b/crates/ethcore/service/src/service.rs @@ -233,7 +233,9 @@ impl IoHandler for ClientIoHandler { ClientIoMessage::Execute(ref exec) => { (*exec.0)(&self.client); } - _ => {} // ignore other messages + ClientIoMessage::NewChainHead => { + self.client.garbage_collect_in_queue(); + } } } } diff --git a/crates/ethcore/src/client/client.rs b/crates/ethcore/src/client/client.rs index e4cc85d3f..48291d278 100644 --- a/crates/ethcore/src/client/client.rs +++ b/crates/ethcore/src/client/client.rs @@ -306,6 +306,10 @@ pub struct Client { shutdown: Arc, + /// block number and block hash of latest gc. + /// this information is used to avoid double garbage collection. + garbage_collect_latest_block: Mutex<(u64, H256)>, + statistics: ClientStatistics, } @@ -842,6 +846,8 @@ impl Importer { warn!("Failed to prune ancient state data: {}", e); } + client.schedule_garbage_collect_in_queue(); + route } @@ -1107,6 +1113,7 @@ impl Client { importer, config, shutdown, + garbage_collect_latest_block: Mutex::new((0, H256::zero())), statistics, }); @@ -1540,6 +1547,52 @@ impl Client { } } + /// Schedule garbage collection of invalid service transactions from the transaction queue based on the given block hash. + pub fn schedule_garbage_collect_in_queue(&self) { + let m = ClientIoMessage::execute(|c| c.garbage_collect_in_queue()); + if let Err(e) = self.io_channel.read().send(m) { + error!(target: "client", "Failed to schedule garbage collection in transaction queue for block {:?}", e); + } + } + + /// Garbage collect invalid service transactions from the transaction queue based on the given block header. + pub fn garbage_collect_in_queue(&self) { + let machine = self.engine().machine(); + + match &self.block_header_decoded(BlockId::Latest) { + Some(block_header) => { + { + // scope for mutex. + let mut last_gc = self.garbage_collect_latest_block.lock(); + + if block_header.number() == last_gc.0 && block_header.hash() == last_gc.1 { + // already gced for this block, or gc is ongoing. + // we can return here. + return; + } + + // we treat ongoing gc as DONE, to avoid blocking of the message channel + last_gc.0 = block_header.number(); + last_gc.1 = block_header.hash(); + } + + // here hides an accepted race condition. + // latest block could change during long ongoing GCs. + // this could be avoided developing a more complex GC logic. + // but the GC blocks the tx queue, so it has to be blazing fast. + self.importer.miner.collect_garbage(|tx| + match machine.verify_transaction(tx.signed(), block_header, self) { + Ok(_) => true, + Err(e) => { + trace!(target: "client", "collected garbage transaction from {:?}: {:?} reason: {:?}", tx.signed().sender(), tx.signed().hash, e); + false + }, + }); + } + None => {} + } + } + fn check_garbage(&self) { self.chain.read().collect_garbage(); self.importer.block_queue.collect_garbage(); diff --git a/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs b/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs index ee41a17ef..a4d5abd73 100644 --- a/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs +++ b/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs @@ -156,12 +156,25 @@ pub fn acks_of_address( if serialized_ack.is_empty() { return Err(CallError::ReturnValueInvalid); } - let deserialized_ack: Ack = bincode::deserialize(&serialized_ack).unwrap(); - let outcome = skg - .handle_ack(vmap.get(&address).unwrap(), deserialized_ack) - .unwrap(); + let deserialized_ack: Ack = match bincode::deserialize(&serialized_ack) { + Ok(ack) => ack, + Err(e) => { + error!(target: "engine", "Failed to deserialize Ack #{} for address {}: {:?}", n, address, e); + return Err(CallError::ReturnValueInvalid); + } + }; + + let outcome = match skg.handle_ack(vmap.get(&address).unwrap(), deserialized_ack) { + Ok(s) => s, + Err(e) => { + error!(target: "engine", "Failed to handle Ack #{} for address {}: {:?}", n, address, e); + return Err(CallError::ReturnValueInvalid); + } + }; + if let AckOutcome::Invalid(fault) = outcome { - panic!("Expected Ack Outcome to be valid. {:?}", fault); + error!(target: "engine", "Invalid Ack Outcome for #{} for address {}: {:?}", n, address, fault); + return Err(CallError::ReturnValueInvalid); } } diff --git a/crates/ethcore/src/engines/hbbft/contracts/staking.rs b/crates/ethcore/src/engines/hbbft/contracts/staking.rs index 84094ccc5..58b0ee653 100644 --- a/crates/ethcore/src/engines/hbbft/contracts/staking.rs +++ b/crates/ethcore/src/engines/hbbft/contracts/staking.rs @@ -35,11 +35,6 @@ pub fn get_posdao_epoch_start( call_const_staking!(c, staking_epoch_start_block) } -pub fn start_time_of_next_phase_transition(client: &dyn EngineClient) -> Result { - let c = BoundContract::bind(client, BlockId::Latest, *STAKING_CONTRACT_ADDRESS); - call_const_staking!(c, start_time_of_next_phase_transition) -} - pub fn candidate_min_stake(client: &dyn EngineClient) -> Result { let c = BoundContract::bind(client, BlockId::Latest, *STAKING_CONTRACT_ADDRESS); call_const_staking!(c, candidate_min_stake) diff --git a/crates/ethcore/src/engines/hbbft/hbbft_early_epoch_end_manager.rs b/crates/ethcore/src/engines/hbbft/hbbft_early_epoch_end_manager.rs index 398246ab4..3134d2fcb 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_early_epoch_end_manager.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_early_epoch_end_manager.rs @@ -112,7 +112,7 @@ impl HbbftEarlyEpochEndManager { signing_address: signing_address.clone(), }; - info!(target: "engine", "early-epoch-end: HbbftEarlyEpochEndManager created. start_time {now:?}, start_block: {epoch_start_block}"); + trace!(target: "engine", "early-epoch-end: HbbftEarlyEpochEndManager created. start_time {now:?}, start_block: {epoch_start_block}"); return Some(result); } diff --git a/crates/ethcore/src/engines/hbbft/hbbft_engine.rs b/crates/ethcore/src/engines/hbbft/hbbft_engine.rs index c3eca3362..2d93b169b 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_engine.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_engine.rs @@ -51,7 +51,6 @@ use super::{ NodeId, contracts::{ keygen_history::{all_parts_acks_available, initialize_synckeygen}, - staking::start_time_of_next_phase_transition, validator_set::{ValidatorType, get_pending_validators, is_pending_validator}, }, contribution::{unix_now_millis, unix_now_secs}, @@ -329,9 +328,6 @@ impl TransitionHandler { // If the minimum block time has passed we are ready to trigger new blocks. if timer_duration == Duration::from_secs(0) { - // Always create blocks if we are in the keygen phase. - self.engine.start_hbbft_epoch_if_next_phase(); - // If the maximum block time has been reached we trigger a new block in any case. if self.max_block_time_remaining(client.clone()) == Duration::from_secs(0) { self.engine.start_hbbft_epoch(client); @@ -1104,27 +1100,6 @@ impl HoneyBadgerBFT { self.client.read().as_ref().and_then(Weak::upgrade) } - fn start_hbbft_epoch_if_next_phase(&self) { - // experimental deactivation of empty blocks. - // see: https://github.com/DMDcoin/diamond-node/issues/160 - - match self.client_arc() { - None => return, - Some(client) => { - // Get the next phase start time - let genesis_transition_time = match start_time_of_next_phase_transition(&*client) { - Ok(time) => time, - Err(_) => return, - }; - - // If current time larger than phase start time, start a new block. - if genesis_transition_time.as_u64() < unix_now_secs() { - self.start_hbbft_epoch(client); - } - } - } - } - fn replay_cached_messages(&self) -> Option<()> { let client = self.client_arc()?; @@ -1236,12 +1211,12 @@ impl HoneyBadgerBFT { return Ok(()); } - self.hbbft_peers_service - .channel() - .send(HbbftConnectToPeersMessage::AnnounceAvailability)?; - - self.hbbft_peers_service - .send_message(HbbftConnectToPeersMessage::AnnounceOwnInternetAddress)?; + if self.is_staked() { + self.hbbft_peers_service + .send_message(HbbftConnectToPeersMessage::AnnounceAvailability)?; + self.hbbft_peers_service + .send_message(HbbftConnectToPeersMessage::AnnounceOwnInternetAddress)?; + } if self.should_connect_to_validator_set() { // we just keep those variables here, because we need them in the early_epoch_end_manager. diff --git a/crates/ethcore/src/engines/hbbft/hbbft_state.rs b/crates/ethcore/src/engines/hbbft/hbbft_state.rs index 725e5b75f..3bdb78c20 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_state.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_state.rs @@ -209,10 +209,7 @@ impl HbbftState { // apply DAO updates here. // update the current minimum gas price. - match get_minimum_gas_from_permission_contract( - client.as_ref(), - BlockId::Number(self.current_posdao_epoch_start_block), - ) { + match get_minimum_gas_from_permission_contract(client.as_ref(), BlockId::Latest) { Ok(min_gas) => { *current_minimum_gas_price.lock() = Some(min_gas); } @@ -222,10 +219,29 @@ impl HbbftState { } if sks.is_none() { - info!(target: "engine", "We are not part of the HoneyBadger validator set - running as regular node."); + info!(target: "engine", "We are not part of the HoneyBadger validator set - Running as regular node."); peers_service .send_message(HbbftConnectToPeersMessage::DisconnectAllValidators) .ok()?; + + if self.is_validator() { + let is_syncing = if let Some(full) = client.as_full_client() { + full.is_major_syncing() + } else { + info!(target: "engine", "Node was a validator: cannot be determinated, because client is not a full client. (https://github.com/DMDcoin/diamond-node/issues/322.)"); + return Some(()); + }; + + if is_syncing { + debug!(target: "engine", "Node was a validator, and became regular node, but we are syncing, not shutting down Node as defined in https://github.com/DMDcoin/diamond-node/issues/322."); + } else { + info!(target: "engine", "Node was a validator, and became regular node. shutting down Node as defined in https://github.com/DMDcoin/diamond-node/issues/322."); + // for unit tests no problem, demand shutddown won't to anything if its a unit test. + // e2e tests needs adaptation. + // this gracefully shuts down a node, if it was a validator before, but now it is not anymore. + client.demand_shutdown(); + } + } return Some(()); } diff --git a/crates/ethcore/src/engines/hbbft/test/mod.rs b/crates/ethcore/src/engines/hbbft/test/mod.rs index 4c935ff7a..e95a4c576 100644 --- a/crates/ethcore/src/engines/hbbft/test/mod.rs +++ b/crates/ethcore/src/engines/hbbft/test/mod.rs @@ -1,12 +1,11 @@ use super::{ contracts::{ staking::{ - get_posdao_epoch, start_time_of_next_phase_transition, + get_posdao_epoch, tests::{create_staker, is_pool_active}, }, validator_set::{is_pending_validator, mining_by_staking_address}, }, - contribution::unix_now_secs, test::hbbft_test_client::{HbbftTestClient, create_hbbft_client, create_hbbft_clients}, }; use crate::{client::traits::BlockInfo, types::ids::BlockId}; @@ -129,12 +128,6 @@ fn test_epoch_transition() { // To avoid performing external transactions with the MoC we create and fund a random address. let transactor: KeyPair = Random.generate(); - let genesis_transition_time = start_time_of_next_phase_transition(moc.client.as_ref()) - .expect("start_time_of_next_phase_transition call must succeed"); - - // Genesis block is at time 0, current unix time must be much larger. - assert!(genesis_transition_time.as_u64() < unix_now_secs()); - // We should not be in the pending validator set at the genesis block. assert!( !is_pending_validator(moc.client.as_ref(), &moc.address()) diff --git a/crates/ethcore/src/miner/miner.rs b/crates/ethcore/src/miner/miner.rs index ee2135764..5df562f2c 100644 --- a/crates/ethcore/src/miner/miner.rs +++ b/crates/ethcore/src/miner/miner.rs @@ -420,6 +420,17 @@ impl Miner { self.service_transaction_checker.clone() } + /// Performs garbage collection of the pool for free service transactions. + /// Removes transactions that are not valid anymore. + /// The process executes listener calls. + pub fn collect_garbage bool>( + &self, + service_transaction_filter: F, + ) { + self.transaction_queue + .garbage_collect(service_transaction_filter); + } + /// Retrieves an existing pending block iff it's not older than given block number. /// /// NOTE: This will not prepare a new pending block if it's not existing. diff --git a/crates/ethcore/sync/src/chain/handler.rs b/crates/ethcore/sync/src/chain/handler.rs index a4a74f0ba..0c4e92ea5 100644 --- a/crates/ethcore/sync/src/chain/handler.rs +++ b/crates/ethcore/sync/src/chain/handler.rs @@ -884,10 +884,11 @@ impl SyncHandler { return Ok(()); } - if io - .chain() - .transaction_if_readable(&hash, &deadline.time_left()) - .is_none() + if !sync.lately_received_transactions.contains(&hash) + && io + .chain() + .transaction_if_readable(&hash, &deadline.time_left()) + .is_none() { sync.peers .get_mut(&peer_id) diff --git a/crates/ethcore/sync/src/chain/mod.rs b/crates/ethcore/sync/src/chain/mod.rs index 3a204b3c1..2053653fe 100644 --- a/crates/ethcore/sync/src/chain/mod.rs +++ b/crates/ethcore/sync/src/chain/mod.rs @@ -758,6 +758,8 @@ pub struct ChainSync { statistics: SyncPropagatorStatistics, /// memorizing currently pooled transaction to reduce the number of pooled transaction requests. asking_pooled_transaction_overview: PooledTransactionOverview, + /// memorized lately received transactions to avoid requesting them again. + lately_received_transactions: H256FastSet, } #[derive(Debug, Default)] @@ -849,6 +851,7 @@ impl ChainSync { new_transactions_stats_period: config.new_transactions_stats_period, statistics: SyncPropagatorStatistics::new(), asking_pooled_transaction_overview: PooledTransactionOverview::new(), + lately_received_transactions: H256FastSet::default(), }; sync.update_targets(chain); sync @@ -952,6 +955,9 @@ impl ChainSync { trace!(target: "sync", "Received {:?}", txs.iter().map(|t| t.hash).map(|t| t.0).collect::>()); } + self.lately_received_transactions + .extend(txs.iter().map(|tx| tx.hash())); + // Remove imported txs from all request queues let imported = txs.iter().map(|tx| tx.hash()).collect::(); for (pid, peer_info) in &mut self.peers { @@ -1024,6 +1030,7 @@ impl ChainSync { // Reactivate peers only if some progress has been made // since the last sync round of if starting fresh. self.active_peers = self.peers.keys().cloned().collect(); + self.lately_received_transactions.clear(); debug!(target: "sync", "resetting sync state to {:?}", self.state); } @@ -1267,6 +1274,7 @@ impl ChainSync { debug!(target: "sync", "sync_peer: {} force {} state: {:?}", peer_id, force, self.state ); + if !self.active_peers.contains(&peer_id) { trace!(target: "sync", "Skipping deactivated peer {}", peer_id); return; @@ -1439,7 +1447,20 @@ impl ChainSync { // and if we have nothing else to do, get the peer to give us at least some of announced but unfetched transactions let mut to_send = H256FastSet::default(); if let Some(peer) = self.peers.get_mut(&peer_id) { - // info: this check should do nothing, if everything is tracked correctly, + // remove lately received transactions from unfetched list + // depending witch collection is larger, we choose the appropriate method. + if self.lately_received_transactions.len() > 0 { + if peer.unfetched_pooled_transactions.len() + > self.lately_received_transactions.len() + { + self.lately_received_transactions.iter().for_each(|h| { + peer.unfetched_pooled_transactions.remove(h); + }); + } else { + peer.unfetched_pooled_transactions + .retain(|h| !self.lately_received_transactions.contains(h)); + } + } if peer.asking_pooled_transactions.is_empty() { // todo: we might just request the same transactions from multiple peers here, at the same time. diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index db2478e95..a04fb955a 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -8,7 +8,6 @@ authors = ["Dragan Rakita . -use log::{trace, warn}; +use log::{debug, trace, warn}; use std::{ collections::{hash_map, BTreeSet, HashMap}, slice, @@ -236,6 +236,45 @@ where } } + /// Performs garbage collection of the pool for free service transactions (zero gas transactions). + /// Only checks lowest nonce. + /// inject "should_keep_function" to decide. + /// The process executes listener calls for invalid transactions. + pub fn garbage_collect bool>(&mut self, should_keep_function: F) { + if self.transactions.is_empty() { + return; + } + + let mut txs_to_remove = Vec::::new(); + + for sender in self.transactions.iter() { + for tx in sender.1.iter_transactions() { + if tx.transaction.has_zero_gas_price() { + if !should_keep_function(&tx.transaction) { + txs_to_remove.push(tx.hash().clone()); + } + } else { + // if the next transaction has not zero gas price, + // we are not continuing. + break; + }; + } + } + + if txs_to_remove.is_empty() { + return; + } + + debug!( + "Garbage collection: removing invalid {} service transactions from pool.", + txs_to_remove.len() + ); + + for tx in txs_to_remove.iter() { + self.remove(tx, true); + } + } + /// Updates state of the pool statistics if the transaction was added to a set. fn finalize_insert(&mut self, new: &Transaction, old: Option<&Transaction>) { self.mem_usage += new.mem_usage(); diff --git a/crates/util/version/Cargo.toml b/crates/util/version/Cargo.toml index be1b4000e..8737eca0a 100644 --- a/crates/util/version/Cargo.toml +++ b/crates/util/version/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "parity-version" # NOTE: this value is used for OpenEthereum version string (via env CARGO_PKG_VERSION) -version = "4.0.1" +version = "4.0.2" authors = [ "bit.diamonds developers", "OpenEthereum developers",